##// END OF EJS Templates
localrepo: simplify requirements checking
Matt Mackall -
r6895:a6bb9493 default
parent child Browse files
Show More
@@ -1,2090 +1,2088
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store
12 import lock, transaction, stat, errno, ui, store
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = util.set(('lookup', 'changegroupsubset'))
18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 supported = ('revlogv1', 'store')
19 supported = ('revlogv1', 'store')
20
20
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.root = os.path.realpath(path)
23 self.root = os.path.realpath(path)
24 self.path = os.path.join(self.root, ".hg")
24 self.path = os.path.join(self.root, ".hg")
25 self.origroot = path
25 self.origroot = path
26 self.opener = util.opener(self.path)
26 self.opener = util.opener(self.path)
27 self.wopener = util.opener(self.root)
27 self.wopener = util.opener(self.root)
28
28
29 if not os.path.isdir(self.path):
29 if not os.path.isdir(self.path):
30 if create:
30 if create:
31 if not os.path.exists(path):
31 if not os.path.exists(path):
32 os.mkdir(path)
32 os.mkdir(path)
33 os.mkdir(self.path)
33 os.mkdir(self.path)
34 requirements = ["revlogv1"]
34 requirements = ["revlogv1"]
35 if parentui.configbool('format', 'usestore', True):
35 if parentui.configbool('format', 'usestore', True):
36 os.mkdir(os.path.join(self.path, "store"))
36 os.mkdir(os.path.join(self.path, "store"))
37 requirements.append("store")
37 requirements.append("store")
38 # create an invalid changelog
38 # create an invalid changelog
39 self.opener("00changelog.i", "a").write(
39 self.opener("00changelog.i", "a").write(
40 '\0\0\0\2' # represents revlogv2
40 '\0\0\0\2' # represents revlogv2
41 ' dummy changelog to prevent using the old repo layout'
41 ' dummy changelog to prevent using the old repo layout'
42 )
42 )
43 reqfile = self.opener("requires", "w")
43 reqfile = self.opener("requires", "w")
44 for r in requirements:
44 for r in requirements:
45 reqfile.write("%s\n" % r)
45 reqfile.write("%s\n" % r)
46 reqfile.close()
46 reqfile.close()
47 else:
47 else:
48 raise repo.RepoError(_("repository %s not found") % path)
48 raise repo.RepoError(_("repository %s not found") % path)
49 elif create:
49 elif create:
50 raise repo.RepoError(_("repository %s already exists") % path)
50 raise repo.RepoError(_("repository %s already exists") % path)
51 else:
51 else:
52 # find requirements
52 # find requirements
53 requirements = []
53 try:
54 try:
54 requirements = self.opener("requires").read().splitlines()
55 requirements = self.opener("requires").read().splitlines()
56 for r in requirements:
57 if r not in self.supported:
58 raise repo.RepoError(_("requirement '%s' not supported") % r)
55 except IOError, inst:
59 except IOError, inst:
56 if inst.errno != errno.ENOENT:
60 if inst.errno != errno.ENOENT:
57 raise
61 raise
58 requirements = []
59 # check them
60 for r in requirements:
61 if r not in self.supported:
62 raise repo.RepoError(_("requirement '%s' not supported") % r)
63
62
64 self.store = store.store(requirements, self.path)
63 self.store = store.store(requirements, self.path)
65
66 self.spath = self.store.path
64 self.spath = self.store.path
67 self.sopener = self.store.opener
65 self.sopener = self.store.opener
68 self.sjoin = self.store.join
66 self.sjoin = self.store.join
69 self.opener.createmode = self.store.createmode
67 self.opener.createmode = self.store.createmode
70
68
71 self.ui = ui.ui(parentui=parentui)
69 self.ui = ui.ui(parentui=parentui)
72 try:
70 try:
73 self.ui.readconfig(self.join("hgrc"), self.root)
71 self.ui.readconfig(self.join("hgrc"), self.root)
74 extensions.loadall(self.ui)
72 extensions.loadall(self.ui)
75 except IOError:
73 except IOError:
76 pass
74 pass
77
75
78 self.tagscache = None
76 self.tagscache = None
79 self._tagstypecache = None
77 self._tagstypecache = None
80 self.branchcache = None
78 self.branchcache = None
81 self._ubranchcache = None # UTF-8 version of branchcache
79 self._ubranchcache = None # UTF-8 version of branchcache
82 self._branchcachetip = None
80 self._branchcachetip = None
83 self.nodetagscache = None
81 self.nodetagscache = None
84 self.filterpats = {}
82 self.filterpats = {}
85 self._datafilters = {}
83 self._datafilters = {}
86 self._transref = self._lockref = self._wlockref = None
84 self._transref = self._lockref = self._wlockref = None
87
85
88 def __getattr__(self, name):
86 def __getattr__(self, name):
89 if name == 'changelog':
87 if name == 'changelog':
90 self.changelog = changelog.changelog(self.sopener)
88 self.changelog = changelog.changelog(self.sopener)
91 self.sopener.defversion = self.changelog.version
89 self.sopener.defversion = self.changelog.version
92 return self.changelog
90 return self.changelog
93 if name == 'manifest':
91 if name == 'manifest':
94 self.changelog
92 self.changelog
95 self.manifest = manifest.manifest(self.sopener)
93 self.manifest = manifest.manifest(self.sopener)
96 return self.manifest
94 return self.manifest
97 if name == 'dirstate':
95 if name == 'dirstate':
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
96 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 return self.dirstate
97 return self.dirstate
100 else:
98 else:
101 raise AttributeError, name
99 raise AttributeError, name
102
100
103 def __getitem__(self, changeid):
101 def __getitem__(self, changeid):
104 if changeid == None:
102 if changeid == None:
105 return context.workingctx(self)
103 return context.workingctx(self)
106 return context.changectx(self, changeid)
104 return context.changectx(self, changeid)
107
105
108 def __nonzero__(self):
106 def __nonzero__(self):
109 return True
107 return True
110
108
111 def __len__(self):
109 def __len__(self):
112 return len(self.changelog)
110 return len(self.changelog)
113
111
114 def __iter__(self):
112 def __iter__(self):
115 for i in xrange(len(self)):
113 for i in xrange(len(self)):
116 yield i
114 yield i
117
115
118 def url(self):
116 def url(self):
119 return 'file:' + self.root
117 return 'file:' + self.root
120
118
121 def hook(self, name, throw=False, **args):
119 def hook(self, name, throw=False, **args):
122 return hook.hook(self.ui, self, name, throw, **args)
120 return hook.hook(self.ui, self, name, throw, **args)
123
121
124 tag_disallowed = ':\r\n'
122 tag_disallowed = ':\r\n'
125
123
126 def _tag(self, names, node, message, local, user, date, parent=None,
124 def _tag(self, names, node, message, local, user, date, parent=None,
127 extra={}):
125 extra={}):
128 use_dirstate = parent is None
126 use_dirstate = parent is None
129
127
130 if isinstance(names, str):
128 if isinstance(names, str):
131 allchars = names
129 allchars = names
132 names = (names,)
130 names = (names,)
133 else:
131 else:
134 allchars = ''.join(names)
132 allchars = ''.join(names)
135 for c in self.tag_disallowed:
133 for c in self.tag_disallowed:
136 if c in allchars:
134 if c in allchars:
137 raise util.Abort(_('%r cannot be used in a tag name') % c)
135 raise util.Abort(_('%r cannot be used in a tag name') % c)
138
136
139 for name in names:
137 for name in names:
140 self.hook('pretag', throw=True, node=hex(node), tag=name,
138 self.hook('pretag', throw=True, node=hex(node), tag=name,
141 local=local)
139 local=local)
142
140
143 def writetags(fp, names, munge, prevtags):
141 def writetags(fp, names, munge, prevtags):
144 fp.seek(0, 2)
142 fp.seek(0, 2)
145 if prevtags and prevtags[-1] != '\n':
143 if prevtags and prevtags[-1] != '\n':
146 fp.write('\n')
144 fp.write('\n')
147 for name in names:
145 for name in names:
148 m = munge and munge(name) or name
146 m = munge and munge(name) or name
149 if self._tagstypecache and name in self._tagstypecache:
147 if self._tagstypecache and name in self._tagstypecache:
150 old = self.tagscache.get(name, nullid)
148 old = self.tagscache.get(name, nullid)
151 fp.write('%s %s\n' % (hex(old), m))
149 fp.write('%s %s\n' % (hex(old), m))
152 fp.write('%s %s\n' % (hex(node), m))
150 fp.write('%s %s\n' % (hex(node), m))
153 fp.close()
151 fp.close()
154
152
155 prevtags = ''
153 prevtags = ''
156 if local:
154 if local:
157 try:
155 try:
158 fp = self.opener('localtags', 'r+')
156 fp = self.opener('localtags', 'r+')
159 except IOError, err:
157 except IOError, err:
160 fp = self.opener('localtags', 'a')
158 fp = self.opener('localtags', 'a')
161 else:
159 else:
162 prevtags = fp.read()
160 prevtags = fp.read()
163
161
164 # local tags are stored in the current charset
162 # local tags are stored in the current charset
165 writetags(fp, names, None, prevtags)
163 writetags(fp, names, None, prevtags)
166 for name in names:
164 for name in names:
167 self.hook('tag', node=hex(node), tag=name, local=local)
165 self.hook('tag', node=hex(node), tag=name, local=local)
168 return
166 return
169
167
170 if use_dirstate:
168 if use_dirstate:
171 try:
169 try:
172 fp = self.wfile('.hgtags', 'rb+')
170 fp = self.wfile('.hgtags', 'rb+')
173 except IOError, err:
171 except IOError, err:
174 fp = self.wfile('.hgtags', 'ab')
172 fp = self.wfile('.hgtags', 'ab')
175 else:
173 else:
176 prevtags = fp.read()
174 prevtags = fp.read()
177 else:
175 else:
178 try:
176 try:
179 prevtags = self.filectx('.hgtags', parent).data()
177 prevtags = self.filectx('.hgtags', parent).data()
180 except revlog.LookupError:
178 except revlog.LookupError:
181 pass
179 pass
182 fp = self.wfile('.hgtags', 'wb')
180 fp = self.wfile('.hgtags', 'wb')
183 if prevtags:
181 if prevtags:
184 fp.write(prevtags)
182 fp.write(prevtags)
185
183
186 # committed tags are stored in UTF-8
184 # committed tags are stored in UTF-8
187 writetags(fp, names, util.fromlocal, prevtags)
185 writetags(fp, names, util.fromlocal, prevtags)
188
186
189 if use_dirstate and '.hgtags' not in self.dirstate:
187 if use_dirstate and '.hgtags' not in self.dirstate:
190 self.add(['.hgtags'])
188 self.add(['.hgtags'])
191
189
192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
190 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
193 extra=extra)
191 extra=extra)
194
192
195 for name in names:
193 for name in names:
196 self.hook('tag', node=hex(node), tag=name, local=local)
194 self.hook('tag', node=hex(node), tag=name, local=local)
197
195
198 return tagnode
196 return tagnode
199
197
200 def tag(self, names, node, message, local, user, date):
198 def tag(self, names, node, message, local, user, date):
201 '''tag a revision with one or more symbolic names.
199 '''tag a revision with one or more symbolic names.
202
200
203 names is a list of strings or, when adding a single tag, names may be a
201 names is a list of strings or, when adding a single tag, names may be a
204 string.
202 string.
205
203
206 if local is True, the tags are stored in a per-repository file.
204 if local is True, the tags are stored in a per-repository file.
207 otherwise, they are stored in the .hgtags file, and a new
205 otherwise, they are stored in the .hgtags file, and a new
208 changeset is committed with the change.
206 changeset is committed with the change.
209
207
210 keyword arguments:
208 keyword arguments:
211
209
212 local: whether to store tags in non-version-controlled file
210 local: whether to store tags in non-version-controlled file
213 (default False)
211 (default False)
214
212
215 message: commit message to use if committing
213 message: commit message to use if committing
216
214
217 user: name of user to use if committing
215 user: name of user to use if committing
218
216
219 date: date tuple to use if committing'''
217 date: date tuple to use if committing'''
220
218
221 for x in self.status()[:5]:
219 for x in self.status()[:5]:
222 if '.hgtags' in x:
220 if '.hgtags' in x:
223 raise util.Abort(_('working copy of .hgtags is changed '
221 raise util.Abort(_('working copy of .hgtags is changed '
224 '(please commit .hgtags manually)'))
222 '(please commit .hgtags manually)'))
225
223
226 self._tag(names, node, message, local, user, date)
224 self._tag(names, node, message, local, user, date)
227
225
228 def tags(self):
226 def tags(self):
229 '''return a mapping of tag to node'''
227 '''return a mapping of tag to node'''
230 if self.tagscache:
228 if self.tagscache:
231 return self.tagscache
229 return self.tagscache
232
230
233 globaltags = {}
231 globaltags = {}
234 tagtypes = {}
232 tagtypes = {}
235
233
236 def readtags(lines, fn, tagtype):
234 def readtags(lines, fn, tagtype):
237 filetags = {}
235 filetags = {}
238 count = 0
236 count = 0
239
237
240 def warn(msg):
238 def warn(msg):
241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
239 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
242
240
243 for l in lines:
241 for l in lines:
244 count += 1
242 count += 1
245 if not l:
243 if not l:
246 continue
244 continue
247 s = l.split(" ", 1)
245 s = l.split(" ", 1)
248 if len(s) != 2:
246 if len(s) != 2:
249 warn(_("cannot parse entry"))
247 warn(_("cannot parse entry"))
250 continue
248 continue
251 node, key = s
249 node, key = s
252 key = util.tolocal(key.strip()) # stored in UTF-8
250 key = util.tolocal(key.strip()) # stored in UTF-8
253 try:
251 try:
254 bin_n = bin(node)
252 bin_n = bin(node)
255 except TypeError:
253 except TypeError:
256 warn(_("node '%s' is not well formed") % node)
254 warn(_("node '%s' is not well formed") % node)
257 continue
255 continue
258 if bin_n not in self.changelog.nodemap:
256 if bin_n not in self.changelog.nodemap:
259 warn(_("tag '%s' refers to unknown node") % key)
257 warn(_("tag '%s' refers to unknown node") % key)
260 continue
258 continue
261
259
262 h = []
260 h = []
263 if key in filetags:
261 if key in filetags:
264 n, h = filetags[key]
262 n, h = filetags[key]
265 h.append(n)
263 h.append(n)
266 filetags[key] = (bin_n, h)
264 filetags[key] = (bin_n, h)
267
265
268 for k, nh in filetags.items():
266 for k, nh in filetags.items():
269 if k not in globaltags:
267 if k not in globaltags:
270 globaltags[k] = nh
268 globaltags[k] = nh
271 tagtypes[k] = tagtype
269 tagtypes[k] = tagtype
272 continue
270 continue
273
271
274 # we prefer the global tag if:
272 # we prefer the global tag if:
275 # it supercedes us OR
273 # it supercedes us OR
276 # mutual supercedes and it has a higher rank
274 # mutual supercedes and it has a higher rank
277 # otherwise we win because we're tip-most
275 # otherwise we win because we're tip-most
278 an, ah = nh
276 an, ah = nh
279 bn, bh = globaltags[k]
277 bn, bh = globaltags[k]
280 if (bn != an and an in bh and
278 if (bn != an and an in bh and
281 (bn not in ah or len(bh) > len(ah))):
279 (bn not in ah or len(bh) > len(ah))):
282 an = bn
280 an = bn
283 ah.extend([n for n in bh if n not in ah])
281 ah.extend([n for n in bh if n not in ah])
284 globaltags[k] = an, ah
282 globaltags[k] = an, ah
285 tagtypes[k] = tagtype
283 tagtypes[k] = tagtype
286
284
287 # read the tags file from each head, ending with the tip
285 # read the tags file from each head, ending with the tip
288 f = None
286 f = None
289 for rev, node, fnode in self._hgtagsnodes():
287 for rev, node, fnode in self._hgtagsnodes():
290 f = (f and f.filectx(fnode) or
288 f = (f and f.filectx(fnode) or
291 self.filectx('.hgtags', fileid=fnode))
289 self.filectx('.hgtags', fileid=fnode))
292 readtags(f.data().splitlines(), f, "global")
290 readtags(f.data().splitlines(), f, "global")
293
291
294 try:
292 try:
295 data = util.fromlocal(self.opener("localtags").read())
293 data = util.fromlocal(self.opener("localtags").read())
296 # localtags are stored in the local character set
294 # localtags are stored in the local character set
297 # while the internal tag table is stored in UTF-8
295 # while the internal tag table is stored in UTF-8
298 readtags(data.splitlines(), "localtags", "local")
296 readtags(data.splitlines(), "localtags", "local")
299 except IOError:
297 except IOError:
300 pass
298 pass
301
299
302 self.tagscache = {}
300 self.tagscache = {}
303 self._tagstypecache = {}
301 self._tagstypecache = {}
304 for k,nh in globaltags.items():
302 for k,nh in globaltags.items():
305 n = nh[0]
303 n = nh[0]
306 if n != nullid:
304 if n != nullid:
307 self.tagscache[k] = n
305 self.tagscache[k] = n
308 self._tagstypecache[k] = tagtypes[k]
306 self._tagstypecache[k] = tagtypes[k]
309 self.tagscache['tip'] = self.changelog.tip()
307 self.tagscache['tip'] = self.changelog.tip()
310 return self.tagscache
308 return self.tagscache
311
309
312 def tagtype(self, tagname):
310 def tagtype(self, tagname):
313 '''
311 '''
314 return the type of the given tag. result can be:
312 return the type of the given tag. result can be:
315
313
316 'local' : a local tag
314 'local' : a local tag
317 'global' : a global tag
315 'global' : a global tag
318 None : tag does not exist
316 None : tag does not exist
319 '''
317 '''
320
318
321 self.tags()
319 self.tags()
322
320
323 return self._tagstypecache.get(tagname)
321 return self._tagstypecache.get(tagname)
324
322
325 def _hgtagsnodes(self):
323 def _hgtagsnodes(self):
326 heads = self.heads()
324 heads = self.heads()
327 heads.reverse()
325 heads.reverse()
328 last = {}
326 last = {}
329 ret = []
327 ret = []
330 for node in heads:
328 for node in heads:
331 c = self[node]
329 c = self[node]
332 rev = c.rev()
330 rev = c.rev()
333 try:
331 try:
334 fnode = c.filenode('.hgtags')
332 fnode = c.filenode('.hgtags')
335 except revlog.LookupError:
333 except revlog.LookupError:
336 continue
334 continue
337 ret.append((rev, node, fnode))
335 ret.append((rev, node, fnode))
338 if fnode in last:
336 if fnode in last:
339 ret[last[fnode]] = None
337 ret[last[fnode]] = None
340 last[fnode] = len(ret) - 1
338 last[fnode] = len(ret) - 1
341 return [item for item in ret if item]
339 return [item for item in ret if item]
342
340
343 def tagslist(self):
341 def tagslist(self):
344 '''return a list of tags ordered by revision'''
342 '''return a list of tags ordered by revision'''
345 l = []
343 l = []
346 for t, n in self.tags().items():
344 for t, n in self.tags().items():
347 try:
345 try:
348 r = self.changelog.rev(n)
346 r = self.changelog.rev(n)
349 except:
347 except:
350 r = -2 # sort to the beginning of the list if unknown
348 r = -2 # sort to the beginning of the list if unknown
351 l.append((r, t, n))
349 l.append((r, t, n))
352 return [(t, n) for r, t, n in util.sort(l)]
350 return [(t, n) for r, t, n in util.sort(l)]
353
351
354 def nodetags(self, node):
352 def nodetags(self, node):
355 '''return the tags associated with a node'''
353 '''return the tags associated with a node'''
356 if not self.nodetagscache:
354 if not self.nodetagscache:
357 self.nodetagscache = {}
355 self.nodetagscache = {}
358 for t, n in self.tags().items():
356 for t, n in self.tags().items():
359 self.nodetagscache.setdefault(n, []).append(t)
357 self.nodetagscache.setdefault(n, []).append(t)
360 return self.nodetagscache.get(node, [])
358 return self.nodetagscache.get(node, [])
361
359
362 def _branchtags(self, partial, lrev):
360 def _branchtags(self, partial, lrev):
363 tiprev = len(self) - 1
361 tiprev = len(self) - 1
364 if lrev != tiprev:
362 if lrev != tiprev:
365 self._updatebranchcache(partial, lrev+1, tiprev+1)
363 self._updatebranchcache(partial, lrev+1, tiprev+1)
366 self._writebranchcache(partial, self.changelog.tip(), tiprev)
364 self._writebranchcache(partial, self.changelog.tip(), tiprev)
367
365
368 return partial
366 return partial
369
367
370 def branchtags(self):
368 def branchtags(self):
371 tip = self.changelog.tip()
369 tip = self.changelog.tip()
372 if self.branchcache is not None and self._branchcachetip == tip:
370 if self.branchcache is not None and self._branchcachetip == tip:
373 return self.branchcache
371 return self.branchcache
374
372
375 oldtip = self._branchcachetip
373 oldtip = self._branchcachetip
376 self._branchcachetip = tip
374 self._branchcachetip = tip
377 if self.branchcache is None:
375 if self.branchcache is None:
378 self.branchcache = {} # avoid recursion in changectx
376 self.branchcache = {} # avoid recursion in changectx
379 else:
377 else:
380 self.branchcache.clear() # keep using the same dict
378 self.branchcache.clear() # keep using the same dict
381 if oldtip is None or oldtip not in self.changelog.nodemap:
379 if oldtip is None or oldtip not in self.changelog.nodemap:
382 partial, last, lrev = self._readbranchcache()
380 partial, last, lrev = self._readbranchcache()
383 else:
381 else:
384 lrev = self.changelog.rev(oldtip)
382 lrev = self.changelog.rev(oldtip)
385 partial = self._ubranchcache
383 partial = self._ubranchcache
386
384
387 self._branchtags(partial, lrev)
385 self._branchtags(partial, lrev)
388
386
389 # the branch cache is stored on disk as UTF-8, but in the local
387 # the branch cache is stored on disk as UTF-8, but in the local
390 # charset internally
388 # charset internally
391 for k, v in partial.items():
389 for k, v in partial.items():
392 self.branchcache[util.tolocal(k)] = v
390 self.branchcache[util.tolocal(k)] = v
393 self._ubranchcache = partial
391 self._ubranchcache = partial
394 return self.branchcache
392 return self.branchcache
395
393
396 def _readbranchcache(self):
394 def _readbranchcache(self):
397 partial = {}
395 partial = {}
398 try:
396 try:
399 f = self.opener("branch.cache")
397 f = self.opener("branch.cache")
400 lines = f.read().split('\n')
398 lines = f.read().split('\n')
401 f.close()
399 f.close()
402 except (IOError, OSError):
400 except (IOError, OSError):
403 return {}, nullid, nullrev
401 return {}, nullid, nullrev
404
402
405 try:
403 try:
406 last, lrev = lines.pop(0).split(" ", 1)
404 last, lrev = lines.pop(0).split(" ", 1)
407 last, lrev = bin(last), int(lrev)
405 last, lrev = bin(last), int(lrev)
408 if lrev >= len(self) or self[lrev].node() != last:
406 if lrev >= len(self) or self[lrev].node() != last:
409 # invalidate the cache
407 # invalidate the cache
410 raise ValueError('invalidating branch cache (tip differs)')
408 raise ValueError('invalidating branch cache (tip differs)')
411 for l in lines:
409 for l in lines:
412 if not l: continue
410 if not l: continue
413 node, label = l.split(" ", 1)
411 node, label = l.split(" ", 1)
414 partial[label.strip()] = bin(node)
412 partial[label.strip()] = bin(node)
415 except (KeyboardInterrupt, util.SignalInterrupt):
413 except (KeyboardInterrupt, util.SignalInterrupt):
416 raise
414 raise
417 except Exception, inst:
415 except Exception, inst:
418 if self.ui.debugflag:
416 if self.ui.debugflag:
419 self.ui.warn(str(inst), '\n')
417 self.ui.warn(str(inst), '\n')
420 partial, last, lrev = {}, nullid, nullrev
418 partial, last, lrev = {}, nullid, nullrev
421 return partial, last, lrev
419 return partial, last, lrev
422
420
423 def _writebranchcache(self, branches, tip, tiprev):
421 def _writebranchcache(self, branches, tip, tiprev):
424 try:
422 try:
425 f = self.opener("branch.cache", "w", atomictemp=True)
423 f = self.opener("branch.cache", "w", atomictemp=True)
426 f.write("%s %s\n" % (hex(tip), tiprev))
424 f.write("%s %s\n" % (hex(tip), tiprev))
427 for label, node in branches.iteritems():
425 for label, node in branches.iteritems():
428 f.write("%s %s\n" % (hex(node), label))
426 f.write("%s %s\n" % (hex(node), label))
429 f.rename()
427 f.rename()
430 except (IOError, OSError):
428 except (IOError, OSError):
431 pass
429 pass
432
430
433 def _updatebranchcache(self, partial, start, end):
431 def _updatebranchcache(self, partial, start, end):
434 for r in xrange(start, end):
432 for r in xrange(start, end):
435 c = self[r]
433 c = self[r]
436 b = c.branch()
434 b = c.branch()
437 partial[b] = c.node()
435 partial[b] = c.node()
438
436
439 def lookup(self, key):
437 def lookup(self, key):
440 if key == '.':
438 if key == '.':
441 return self.dirstate.parents()[0]
439 return self.dirstate.parents()[0]
442 elif key == 'null':
440 elif key == 'null':
443 return nullid
441 return nullid
444 n = self.changelog._match(key)
442 n = self.changelog._match(key)
445 if n:
443 if n:
446 return n
444 return n
447 if key in self.tags():
445 if key in self.tags():
448 return self.tags()[key]
446 return self.tags()[key]
449 if key in self.branchtags():
447 if key in self.branchtags():
450 return self.branchtags()[key]
448 return self.branchtags()[key]
451 n = self.changelog._partialmatch(key)
449 n = self.changelog._partialmatch(key)
452 if n:
450 if n:
453 return n
451 return n
454 try:
452 try:
455 if len(key) == 20:
453 if len(key) == 20:
456 key = hex(key)
454 key = hex(key)
457 except:
455 except:
458 pass
456 pass
459 raise repo.RepoError(_("unknown revision '%s'") % key)
457 raise repo.RepoError(_("unknown revision '%s'") % key)
460
458
461 def local(self):
459 def local(self):
462 return True
460 return True
463
461
464 def join(self, f):
462 def join(self, f):
465 return os.path.join(self.path, f)
463 return os.path.join(self.path, f)
466
464
467 def wjoin(self, f):
465 def wjoin(self, f):
468 return os.path.join(self.root, f)
466 return os.path.join(self.root, f)
469
467
470 def rjoin(self, f):
468 def rjoin(self, f):
471 return os.path.join(self.root, util.pconvert(f))
469 return os.path.join(self.root, util.pconvert(f))
472
470
473 def file(self, f):
471 def file(self, f):
474 if f[0] == '/':
472 if f[0] == '/':
475 f = f[1:]
473 f = f[1:]
476 return filelog.filelog(self.sopener, f)
474 return filelog.filelog(self.sopener, f)
477
475
478 def changectx(self, changeid):
476 def changectx(self, changeid):
479 return self[changeid]
477 return self[changeid]
480
478
481 def parents(self, changeid=None):
479 def parents(self, changeid=None):
482 '''get list of changectxs for parents of changeid'''
480 '''get list of changectxs for parents of changeid'''
483 return self[changeid].parents()
481 return self[changeid].parents()
484
482
485 def filectx(self, path, changeid=None, fileid=None):
483 def filectx(self, path, changeid=None, fileid=None):
486 """changeid can be a changeset revision, node, or tag.
484 """changeid can be a changeset revision, node, or tag.
487 fileid can be a file revision or node."""
485 fileid can be a file revision or node."""
488 return context.filectx(self, path, changeid, fileid)
486 return context.filectx(self, path, changeid, fileid)
489
487
490 def getcwd(self):
488 def getcwd(self):
491 return self.dirstate.getcwd()
489 return self.dirstate.getcwd()
492
490
493 def pathto(self, f, cwd=None):
491 def pathto(self, f, cwd=None):
494 return self.dirstate.pathto(f, cwd)
492 return self.dirstate.pathto(f, cwd)
495
493
496 def wfile(self, f, mode='r'):
494 def wfile(self, f, mode='r'):
497 return self.wopener(f, mode)
495 return self.wopener(f, mode)
498
496
499 def _link(self, f):
497 def _link(self, f):
500 return os.path.islink(self.wjoin(f))
498 return os.path.islink(self.wjoin(f))
501
499
502 def _filter(self, filter, filename, data):
500 def _filter(self, filter, filename, data):
503 if filter not in self.filterpats:
501 if filter not in self.filterpats:
504 l = []
502 l = []
505 for pat, cmd in self.ui.configitems(filter):
503 for pat, cmd in self.ui.configitems(filter):
506 mf = util.matcher(self.root, "", [pat], [], [])[1]
504 mf = util.matcher(self.root, "", [pat], [], [])[1]
507 fn = None
505 fn = None
508 params = cmd
506 params = cmd
509 for name, filterfn in self._datafilters.iteritems():
507 for name, filterfn in self._datafilters.iteritems():
510 if cmd.startswith(name):
508 if cmd.startswith(name):
511 fn = filterfn
509 fn = filterfn
512 params = cmd[len(name):].lstrip()
510 params = cmd[len(name):].lstrip()
513 break
511 break
514 if not fn:
512 if not fn:
515 fn = lambda s, c, **kwargs: util.filter(s, c)
513 fn = lambda s, c, **kwargs: util.filter(s, c)
516 # Wrap old filters not supporting keyword arguments
514 # Wrap old filters not supporting keyword arguments
517 if not inspect.getargspec(fn)[2]:
515 if not inspect.getargspec(fn)[2]:
518 oldfn = fn
516 oldfn = fn
519 fn = lambda s, c, **kwargs: oldfn(s, c)
517 fn = lambda s, c, **kwargs: oldfn(s, c)
520 l.append((mf, fn, params))
518 l.append((mf, fn, params))
521 self.filterpats[filter] = l
519 self.filterpats[filter] = l
522
520
523 for mf, fn, cmd in self.filterpats[filter]:
521 for mf, fn, cmd in self.filterpats[filter]:
524 if mf(filename):
522 if mf(filename):
525 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
523 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
526 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
524 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
527 break
525 break
528
526
529 return data
527 return data
530
528
531 def adddatafilter(self, name, filter):
529 def adddatafilter(self, name, filter):
532 self._datafilters[name] = filter
530 self._datafilters[name] = filter
533
531
534 def wread(self, filename):
532 def wread(self, filename):
535 if self._link(filename):
533 if self._link(filename):
536 data = os.readlink(self.wjoin(filename))
534 data = os.readlink(self.wjoin(filename))
537 else:
535 else:
538 data = self.wopener(filename, 'r').read()
536 data = self.wopener(filename, 'r').read()
539 return self._filter("encode", filename, data)
537 return self._filter("encode", filename, data)
540
538
541 def wwrite(self, filename, data, flags):
539 def wwrite(self, filename, data, flags):
542 data = self._filter("decode", filename, data)
540 data = self._filter("decode", filename, data)
543 try:
541 try:
544 os.unlink(self.wjoin(filename))
542 os.unlink(self.wjoin(filename))
545 except OSError:
543 except OSError:
546 pass
544 pass
547 if 'l' in flags:
545 if 'l' in flags:
548 self.wopener.symlink(data, filename)
546 self.wopener.symlink(data, filename)
549 else:
547 else:
550 self.wopener(filename, 'w').write(data)
548 self.wopener(filename, 'w').write(data)
551 if 'x' in flags:
549 if 'x' in flags:
552 util.set_flags(self.wjoin(filename), False, True)
550 util.set_flags(self.wjoin(filename), False, True)
553
551
554 def wwritedata(self, filename, data):
552 def wwritedata(self, filename, data):
555 return self._filter("decode", filename, data)
553 return self._filter("decode", filename, data)
556
554
557 def transaction(self):
555 def transaction(self):
558 if self._transref and self._transref():
556 if self._transref and self._transref():
559 return self._transref().nest()
557 return self._transref().nest()
560
558
561 # abort here if the journal already exists
559 # abort here if the journal already exists
562 if os.path.exists(self.sjoin("journal")):
560 if os.path.exists(self.sjoin("journal")):
563 raise repo.RepoError(_("journal already exists - run hg recover"))
561 raise repo.RepoError(_("journal already exists - run hg recover"))
564
562
565 # save dirstate for rollback
563 # save dirstate for rollback
566 try:
564 try:
567 ds = self.opener("dirstate").read()
565 ds = self.opener("dirstate").read()
568 except IOError:
566 except IOError:
569 ds = ""
567 ds = ""
570 self.opener("journal.dirstate", "w").write(ds)
568 self.opener("journal.dirstate", "w").write(ds)
571 self.opener("journal.branch", "w").write(self.dirstate.branch())
569 self.opener("journal.branch", "w").write(self.dirstate.branch())
572
570
573 renames = [(self.sjoin("journal"), self.sjoin("undo")),
571 renames = [(self.sjoin("journal"), self.sjoin("undo")),
574 (self.join("journal.dirstate"), self.join("undo.dirstate")),
572 (self.join("journal.dirstate"), self.join("undo.dirstate")),
575 (self.join("journal.branch"), self.join("undo.branch"))]
573 (self.join("journal.branch"), self.join("undo.branch"))]
576 tr = transaction.transaction(self.ui.warn, self.sopener,
574 tr = transaction.transaction(self.ui.warn, self.sopener,
577 self.sjoin("journal"),
575 self.sjoin("journal"),
578 aftertrans(renames),
576 aftertrans(renames),
579 self.store.createmode)
577 self.store.createmode)
580 self._transref = weakref.ref(tr)
578 self._transref = weakref.ref(tr)
581 return tr
579 return tr
582
580
583 def recover(self):
581 def recover(self):
584 l = self.lock()
582 l = self.lock()
585 try:
583 try:
586 if os.path.exists(self.sjoin("journal")):
584 if os.path.exists(self.sjoin("journal")):
587 self.ui.status(_("rolling back interrupted transaction\n"))
585 self.ui.status(_("rolling back interrupted transaction\n"))
588 transaction.rollback(self.sopener, self.sjoin("journal"))
586 transaction.rollback(self.sopener, self.sjoin("journal"))
589 self.invalidate()
587 self.invalidate()
590 return True
588 return True
591 else:
589 else:
592 self.ui.warn(_("no interrupted transaction available\n"))
590 self.ui.warn(_("no interrupted transaction available\n"))
593 return False
591 return False
594 finally:
592 finally:
595 del l
593 del l
596
594
597 def rollback(self):
595 def rollback(self):
598 wlock = lock = None
596 wlock = lock = None
599 try:
597 try:
600 wlock = self.wlock()
598 wlock = self.wlock()
601 lock = self.lock()
599 lock = self.lock()
602 if os.path.exists(self.sjoin("undo")):
600 if os.path.exists(self.sjoin("undo")):
603 self.ui.status(_("rolling back last transaction\n"))
601 self.ui.status(_("rolling back last transaction\n"))
604 transaction.rollback(self.sopener, self.sjoin("undo"))
602 transaction.rollback(self.sopener, self.sjoin("undo"))
605 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
606 try:
604 try:
607 branch = self.opener("undo.branch").read()
605 branch = self.opener("undo.branch").read()
608 self.dirstate.setbranch(branch)
606 self.dirstate.setbranch(branch)
609 except IOError:
607 except IOError:
610 self.ui.warn(_("Named branch could not be reset, "
608 self.ui.warn(_("Named branch could not be reset, "
611 "current branch still is: %s\n")
609 "current branch still is: %s\n")
612 % util.tolocal(self.dirstate.branch()))
610 % util.tolocal(self.dirstate.branch()))
613 self.invalidate()
611 self.invalidate()
614 self.dirstate.invalidate()
612 self.dirstate.invalidate()
615 else:
613 else:
616 self.ui.warn(_("no rollback information available\n"))
614 self.ui.warn(_("no rollback information available\n"))
617 finally:
615 finally:
618 del lock, wlock
616 del lock, wlock
619
617
620 def invalidate(self):
618 def invalidate(self):
621 for a in "changelog manifest".split():
619 for a in "changelog manifest".split():
622 if a in self.__dict__:
620 if a in self.__dict__:
623 delattr(self, a)
621 delattr(self, a)
624 self.tagscache = None
622 self.tagscache = None
625 self._tagstypecache = None
623 self._tagstypecache = None
626 self.nodetagscache = None
624 self.nodetagscache = None
627 self.branchcache = None
625 self.branchcache = None
628 self._ubranchcache = None
626 self._ubranchcache = None
629 self._branchcachetip = None
627 self._branchcachetip = None
630
628
631 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
629 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
632 try:
630 try:
633 l = lock.lock(lockname, 0, releasefn, desc=desc)
631 l = lock.lock(lockname, 0, releasefn, desc=desc)
634 except lock.LockHeld, inst:
632 except lock.LockHeld, inst:
635 if not wait:
633 if not wait:
636 raise
634 raise
637 self.ui.warn(_("waiting for lock on %s held by %r\n") %
635 self.ui.warn(_("waiting for lock on %s held by %r\n") %
638 (desc, inst.locker))
636 (desc, inst.locker))
639 # default to 600 seconds timeout
637 # default to 600 seconds timeout
640 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
638 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
641 releasefn, desc=desc)
639 releasefn, desc=desc)
642 if acquirefn:
640 if acquirefn:
643 acquirefn()
641 acquirefn()
644 return l
642 return l
645
643
646 def lock(self, wait=True):
644 def lock(self, wait=True):
647 if self._lockref and self._lockref():
645 if self._lockref and self._lockref():
648 return self._lockref()
646 return self._lockref()
649
647
650 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
648 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
651 _('repository %s') % self.origroot)
649 _('repository %s') % self.origroot)
652 self._lockref = weakref.ref(l)
650 self._lockref = weakref.ref(l)
653 return l
651 return l
654
652
655 def wlock(self, wait=True):
653 def wlock(self, wait=True):
656 if self._wlockref and self._wlockref():
654 if self._wlockref and self._wlockref():
657 return self._wlockref()
655 return self._wlockref()
658
656
659 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
657 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
660 self.dirstate.invalidate, _('working directory of %s') %
658 self.dirstate.invalidate, _('working directory of %s') %
661 self.origroot)
659 self.origroot)
662 self._wlockref = weakref.ref(l)
660 self._wlockref = weakref.ref(l)
663 return l
661 return l
664
662
665 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
663 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
666 """
664 """
667 commit an individual file as part of a larger transaction
665 commit an individual file as part of a larger transaction
668 """
666 """
669
667
670 fn = fctx.path()
668 fn = fctx.path()
671 t = fctx.data()
669 t = fctx.data()
672 fl = self.file(fn)
670 fl = self.file(fn)
673 fp1 = manifest1.get(fn, nullid)
671 fp1 = manifest1.get(fn, nullid)
674 fp2 = manifest2.get(fn, nullid)
672 fp2 = manifest2.get(fn, nullid)
675
673
676 meta = {}
674 meta = {}
677 cp = fctx.renamed()
675 cp = fctx.renamed()
678 if cp and cp[0] != fn:
676 if cp and cp[0] != fn:
679 # Mark the new revision of this file as a copy of another
677 # Mark the new revision of this file as a copy of another
680 # file. This copy data will effectively act as a parent
678 # file. This copy data will effectively act as a parent
681 # of this new revision. If this is a merge, the first
679 # of this new revision. If this is a merge, the first
682 # parent will be the nullid (meaning "look up the copy data")
680 # parent will be the nullid (meaning "look up the copy data")
683 # and the second one will be the other parent. For example:
681 # and the second one will be the other parent. For example:
684 #
682 #
685 # 0 --- 1 --- 3 rev1 changes file foo
683 # 0 --- 1 --- 3 rev1 changes file foo
686 # \ / rev2 renames foo to bar and changes it
684 # \ / rev2 renames foo to bar and changes it
687 # \- 2 -/ rev3 should have bar with all changes and
685 # \- 2 -/ rev3 should have bar with all changes and
688 # should record that bar descends from
686 # should record that bar descends from
689 # bar in rev2 and foo in rev1
687 # bar in rev2 and foo in rev1
690 #
688 #
691 # this allows this merge to succeed:
689 # this allows this merge to succeed:
692 #
690 #
693 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
691 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
694 # \ / merging rev3 and rev4 should use bar@rev2
692 # \ / merging rev3 and rev4 should use bar@rev2
695 # \- 2 --- 4 as the merge base
693 # \- 2 --- 4 as the merge base
696 #
694 #
697
695
698 cf = cp[0]
696 cf = cp[0]
699 cr = manifest1.get(cf)
697 cr = manifest1.get(cf)
700 nfp = fp2
698 nfp = fp2
701
699
702 if manifest2: # branch merge
700 if manifest2: # branch merge
703 if fp2 == nullid: # copied on remote side
701 if fp2 == nullid: # copied on remote side
704 if fp1 != nullid or cf in manifest2:
702 if fp1 != nullid or cf in manifest2:
705 cr = manifest2[cf]
703 cr = manifest2[cf]
706 nfp = fp1
704 nfp = fp1
707
705
708 # find source in nearest ancestor if we've lost track
706 # find source in nearest ancestor if we've lost track
709 if not cr:
707 if not cr:
710 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
708 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
711 (fn, cf))
709 (fn, cf))
712 for a in self['.'].ancestors():
710 for a in self['.'].ancestors():
713 if cf in a:
711 if cf in a:
714 cr = a[cf].filenode()
712 cr = a[cf].filenode()
715 break
713 break
716
714
717 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
715 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
718 meta["copy"] = cf
716 meta["copy"] = cf
719 meta["copyrev"] = hex(cr)
717 meta["copyrev"] = hex(cr)
720 fp1, fp2 = nullid, nfp
718 fp1, fp2 = nullid, nfp
721 elif fp2 != nullid:
719 elif fp2 != nullid:
722 # is one parent an ancestor of the other?
720 # is one parent an ancestor of the other?
723 fpa = fl.ancestor(fp1, fp2)
721 fpa = fl.ancestor(fp1, fp2)
724 if fpa == fp1:
722 if fpa == fp1:
725 fp1, fp2 = fp2, nullid
723 fp1, fp2 = fp2, nullid
726 elif fpa == fp2:
724 elif fpa == fp2:
727 fp2 = nullid
725 fp2 = nullid
728
726
729 # is the file unmodified from the parent? report existing entry
727 # is the file unmodified from the parent? report existing entry
730 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
728 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
731 return fp1
729 return fp1
732
730
733 changelist.append(fn)
731 changelist.append(fn)
734 return fl.add(t, meta, tr, linkrev, fp1, fp2)
732 return fl.add(t, meta, tr, linkrev, fp1, fp2)
735
733
736 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
734 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
737 if p1 is None:
735 if p1 is None:
738 p1, p2 = self.dirstate.parents()
736 p1, p2 = self.dirstate.parents()
739 return self.commit(files=files, text=text, user=user, date=date,
737 return self.commit(files=files, text=text, user=user, date=date,
740 p1=p1, p2=p2, extra=extra, empty_ok=True)
738 p1=p1, p2=p2, extra=extra, empty_ok=True)
741
739
742 def commit(self, files=None, text="", user=None, date=None,
740 def commit(self, files=None, text="", user=None, date=None,
743 match=None, force=False, force_editor=False,
741 match=None, force=False, force_editor=False,
744 p1=None, p2=None, extra={}, empty_ok=False):
742 p1=None, p2=None, extra={}, empty_ok=False):
745 wlock = lock = None
743 wlock = lock = None
746 if files:
744 if files:
747 files = util.unique(files)
745 files = util.unique(files)
748 try:
746 try:
749 wlock = self.wlock()
747 wlock = self.wlock()
750 lock = self.lock()
748 lock = self.lock()
751 use_dirstate = (p1 is None) # not rawcommit
749 use_dirstate = (p1 is None) # not rawcommit
752
750
753 if use_dirstate:
751 if use_dirstate:
754 p1, p2 = self.dirstate.parents()
752 p1, p2 = self.dirstate.parents()
755 update_dirstate = True
753 update_dirstate = True
756
754
757 if (not force and p2 != nullid and
755 if (not force and p2 != nullid and
758 (match and (match.files() or match.anypats()))):
756 (match and (match.files() or match.anypats()))):
759 raise util.Abort(_('cannot partially commit a merge '
757 raise util.Abort(_('cannot partially commit a merge '
760 '(do not specify files or patterns)'))
758 '(do not specify files or patterns)'))
761
759
762 if files:
760 if files:
763 modified, removed = [], []
761 modified, removed = [], []
764 for f in files:
762 for f in files:
765 s = self.dirstate[f]
763 s = self.dirstate[f]
766 if s in 'nma':
764 if s in 'nma':
767 modified.append(f)
765 modified.append(f)
768 elif s == 'r':
766 elif s == 'r':
769 removed.append(f)
767 removed.append(f)
770 else:
768 else:
771 self.ui.warn(_("%s not tracked!\n") % f)
769 self.ui.warn(_("%s not tracked!\n") % f)
772 changes = [modified, [], removed, [], []]
770 changes = [modified, [], removed, [], []]
773 else:
771 else:
774 changes = self.status(match=match)
772 changes = self.status(match=match)
775 else:
773 else:
776 p1, p2 = p1, p2 or nullid
774 p1, p2 = p1, p2 or nullid
777 update_dirstate = (self.dirstate.parents()[0] == p1)
775 update_dirstate = (self.dirstate.parents()[0] == p1)
778 changes = [files, [], [], [], []]
776 changes = [files, [], [], [], []]
779
777
780 ms = merge_.mergestate(self)
778 ms = merge_.mergestate(self)
781 for f in changes[0]:
779 for f in changes[0]:
782 if f in ms and ms[f] == 'u':
780 if f in ms and ms[f] == 'u':
783 raise util.Abort(_("unresolved merge conflicts "
781 raise util.Abort(_("unresolved merge conflicts "
784 "(see hg resolve)"))
782 "(see hg resolve)"))
785 wctx = context.workingctx(self, (p1, p2), text, user, date,
783 wctx = context.workingctx(self, (p1, p2), text, user, date,
786 extra, changes)
784 extra, changes)
787 return self._commitctx(wctx, force, force_editor, empty_ok,
785 return self._commitctx(wctx, force, force_editor, empty_ok,
788 use_dirstate, update_dirstate)
786 use_dirstate, update_dirstate)
789 finally:
787 finally:
790 del lock, wlock
788 del lock, wlock
791
789
792 def commitctx(self, ctx):
790 def commitctx(self, ctx):
793 wlock = lock = None
791 wlock = lock = None
794 try:
792 try:
795 wlock = self.wlock()
793 wlock = self.wlock()
796 lock = self.lock()
794 lock = self.lock()
797 return self._commitctx(ctx, force=True, force_editor=False,
795 return self._commitctx(ctx, force=True, force_editor=False,
798 empty_ok=True, use_dirstate=False,
796 empty_ok=True, use_dirstate=False,
799 update_dirstate=False)
797 update_dirstate=False)
800 finally:
798 finally:
801 del lock, wlock
799 del lock, wlock
802
800
803 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
801 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
804 use_dirstate=True, update_dirstate=True):
802 use_dirstate=True, update_dirstate=True):
805 tr = None
803 tr = None
806 valid = 0 # don't save the dirstate if this isn't set
804 valid = 0 # don't save the dirstate if this isn't set
807 try:
805 try:
808 commit = util.sort(wctx.modified() + wctx.added())
806 commit = util.sort(wctx.modified() + wctx.added())
809 remove = wctx.removed()
807 remove = wctx.removed()
810 extra = wctx.extra().copy()
808 extra = wctx.extra().copy()
811 branchname = extra['branch']
809 branchname = extra['branch']
812 user = wctx.user()
810 user = wctx.user()
813 text = wctx.description()
811 text = wctx.description()
814
812
815 p1, p2 = [p.node() for p in wctx.parents()]
813 p1, p2 = [p.node() for p in wctx.parents()]
816 c1 = self.changelog.read(p1)
814 c1 = self.changelog.read(p1)
817 c2 = self.changelog.read(p2)
815 c2 = self.changelog.read(p2)
818 m1 = self.manifest.read(c1[0]).copy()
816 m1 = self.manifest.read(c1[0]).copy()
819 m2 = self.manifest.read(c2[0])
817 m2 = self.manifest.read(c2[0])
820
818
821 if use_dirstate:
819 if use_dirstate:
822 oldname = c1[5].get("branch") # stored in UTF-8
820 oldname = c1[5].get("branch") # stored in UTF-8
823 if (not commit and not remove and not force and p2 == nullid
821 if (not commit and not remove and not force and p2 == nullid
824 and branchname == oldname):
822 and branchname == oldname):
825 self.ui.status(_("nothing changed\n"))
823 self.ui.status(_("nothing changed\n"))
826 return None
824 return None
827
825
828 xp1 = hex(p1)
826 xp1 = hex(p1)
829 if p2 == nullid: xp2 = ''
827 if p2 == nullid: xp2 = ''
830 else: xp2 = hex(p2)
828 else: xp2 = hex(p2)
831
829
832 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
830 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
833
831
834 tr = self.transaction()
832 tr = self.transaction()
835 trp = weakref.proxy(tr)
833 trp = weakref.proxy(tr)
836
834
837 # check in files
835 # check in files
838 new = {}
836 new = {}
839 changed = []
837 changed = []
840 linkrev = len(self)
838 linkrev = len(self)
841 for f in commit:
839 for f in commit:
842 self.ui.note(f + "\n")
840 self.ui.note(f + "\n")
843 try:
841 try:
844 fctx = wctx.filectx(f)
842 fctx = wctx.filectx(f)
845 newflags = fctx.flags()
843 newflags = fctx.flags()
846 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
844 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
847 if ((not changed or changed[-1] != f) and
845 if ((not changed or changed[-1] != f) and
848 m2.get(f) != new[f]):
846 m2.get(f) != new[f]):
849 # mention the file in the changelog if some
847 # mention the file in the changelog if some
850 # flag changed, even if there was no content
848 # flag changed, even if there was no content
851 # change.
849 # change.
852 if m1.flags(f) != newflags:
850 if m1.flags(f) != newflags:
853 changed.append(f)
851 changed.append(f)
854 m1.set(f, newflags)
852 m1.set(f, newflags)
855 if use_dirstate:
853 if use_dirstate:
856 self.dirstate.normal(f)
854 self.dirstate.normal(f)
857
855
858 except (OSError, IOError):
856 except (OSError, IOError):
859 if use_dirstate:
857 if use_dirstate:
860 self.ui.warn(_("trouble committing %s!\n") % f)
858 self.ui.warn(_("trouble committing %s!\n") % f)
861 raise
859 raise
862 else:
860 else:
863 remove.append(f)
861 remove.append(f)
864
862
865 # update manifest
863 # update manifest
866 m1.update(new)
864 m1.update(new)
867 removed = []
865 removed = []
868
866
869 for f in util.sort(remove):
867 for f in util.sort(remove):
870 if f in m1:
868 if f in m1:
871 del m1[f]
869 del m1[f]
872 removed.append(f)
870 removed.append(f)
873 elif f in m2:
871 elif f in m2:
874 removed.append(f)
872 removed.append(f)
875 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
873 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
876 (new, removed))
874 (new, removed))
877
875
878 # add changeset
876 # add changeset
879 if (not empty_ok and not text) or force_editor:
877 if (not empty_ok and not text) or force_editor:
880 edittext = []
878 edittext = []
881 if text:
879 if text:
882 edittext.append(text)
880 edittext.append(text)
883 edittext.append("")
881 edittext.append("")
884 edittext.append(_("HG: Enter commit message."
882 edittext.append(_("HG: Enter commit message."
885 " Lines beginning with 'HG:' are removed."))
883 " Lines beginning with 'HG:' are removed."))
886 edittext.append("HG: --")
884 edittext.append("HG: --")
887 edittext.append("HG: user: %s" % user)
885 edittext.append("HG: user: %s" % user)
888 if p2 != nullid:
886 if p2 != nullid:
889 edittext.append("HG: branch merge")
887 edittext.append("HG: branch merge")
890 if branchname:
888 if branchname:
891 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
889 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
892 edittext.extend(["HG: changed %s" % f for f in changed])
890 edittext.extend(["HG: changed %s" % f for f in changed])
893 edittext.extend(["HG: removed %s" % f for f in removed])
891 edittext.extend(["HG: removed %s" % f for f in removed])
894 if not changed and not remove:
892 if not changed and not remove:
895 edittext.append("HG: no files changed")
893 edittext.append("HG: no files changed")
896 edittext.append("")
894 edittext.append("")
897 # run editor in the repository root
895 # run editor in the repository root
898 olddir = os.getcwd()
896 olddir = os.getcwd()
899 os.chdir(self.root)
897 os.chdir(self.root)
900 text = self.ui.edit("\n".join(edittext), user)
898 text = self.ui.edit("\n".join(edittext), user)
901 os.chdir(olddir)
899 os.chdir(olddir)
902
900
903 lines = [line.rstrip() for line in text.rstrip().splitlines()]
901 lines = [line.rstrip() for line in text.rstrip().splitlines()]
904 while lines and not lines[0]:
902 while lines and not lines[0]:
905 del lines[0]
903 del lines[0]
906 if not lines and use_dirstate:
904 if not lines and use_dirstate:
907 raise util.Abort(_("empty commit message"))
905 raise util.Abort(_("empty commit message"))
908 text = '\n'.join(lines)
906 text = '\n'.join(lines)
909
907
910 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
908 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
911 user, wctx.date(), extra)
909 user, wctx.date(), extra)
912 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
910 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
913 parent2=xp2)
911 parent2=xp2)
914 tr.close()
912 tr.close()
915
913
916 if self.branchcache:
914 if self.branchcache:
917 self.branchtags()
915 self.branchtags()
918
916
919 if use_dirstate or update_dirstate:
917 if use_dirstate or update_dirstate:
920 self.dirstate.setparents(n)
918 self.dirstate.setparents(n)
921 if use_dirstate:
919 if use_dirstate:
922 for f in removed:
920 for f in removed:
923 self.dirstate.forget(f)
921 self.dirstate.forget(f)
924 valid = 1 # our dirstate updates are complete
922 valid = 1 # our dirstate updates are complete
925
923
926 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
924 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
927 return n
925 return n
928 finally:
926 finally:
929 if not valid: # don't save our updated dirstate
927 if not valid: # don't save our updated dirstate
930 self.dirstate.invalidate()
928 self.dirstate.invalidate()
931 del tr
929 del tr
932
930
933 def walk(self, match, node=None):
931 def walk(self, match, node=None):
934 '''
932 '''
935 walk recursively through the directory tree or a given
933 walk recursively through the directory tree or a given
936 changeset, finding all files matched by the match
934 changeset, finding all files matched by the match
937 function
935 function
938 '''
936 '''
939 return self[node].walk(match)
937 return self[node].walk(match)
940
938
941 def status(self, node1='.', node2=None, match=None,
939 def status(self, node1='.', node2=None, match=None,
942 ignored=False, clean=False, unknown=False):
940 ignored=False, clean=False, unknown=False):
943 """return status of files between two nodes or node and working directory
941 """return status of files between two nodes or node and working directory
944
942
945 If node1 is None, use the first dirstate parent instead.
943 If node1 is None, use the first dirstate parent instead.
946 If node2 is None, compare node1 with working directory.
944 If node2 is None, compare node1 with working directory.
947 """
945 """
948
946
949 def mfmatches(ctx):
947 def mfmatches(ctx):
950 mf = ctx.manifest().copy()
948 mf = ctx.manifest().copy()
951 for fn in mf.keys():
949 for fn in mf.keys():
952 if not match(fn):
950 if not match(fn):
953 del mf[fn]
951 del mf[fn]
954 return mf
952 return mf
955
953
956 ctx1 = self[node1]
954 ctx1 = self[node1]
957 ctx2 = self[node2]
955 ctx2 = self[node2]
958 working = ctx2 == self[None]
956 working = ctx2 == self[None]
959 parentworking = working and ctx1 == self['.']
957 parentworking = working and ctx1 == self['.']
960 match = match or match_.always(self.root, self.getcwd())
958 match = match or match_.always(self.root, self.getcwd())
961 listignored, listclean, listunknown = ignored, clean, unknown
959 listignored, listclean, listunknown = ignored, clean, unknown
962
960
963 if working: # we need to scan the working dir
961 if working: # we need to scan the working dir
964 s = self.dirstate.status(match, listignored, listclean, listunknown)
962 s = self.dirstate.status(match, listignored, listclean, listunknown)
965 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
963 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
966
964
967 # check for any possibly clean files
965 # check for any possibly clean files
968 if parentworking and cmp:
966 if parentworking and cmp:
969 fixup = []
967 fixup = []
970 # do a full compare of any files that might have changed
968 # do a full compare of any files that might have changed
971 for f in cmp:
969 for f in cmp:
972 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
970 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
973 or ctx1[f].cmp(ctx2[f].data())):
971 or ctx1[f].cmp(ctx2[f].data())):
974 modified.append(f)
972 modified.append(f)
975 else:
973 else:
976 fixup.append(f)
974 fixup.append(f)
977
975
978 if listclean:
976 if listclean:
979 clean += fixup
977 clean += fixup
980
978
981 # update dirstate for files that are actually clean
979 # update dirstate for files that are actually clean
982 if fixup:
980 if fixup:
983 wlock = None
981 wlock = None
984 try:
982 try:
985 try:
983 try:
986 wlock = self.wlock(False)
984 wlock = self.wlock(False)
987 for f in fixup:
985 for f in fixup:
988 self.dirstate.normal(f)
986 self.dirstate.normal(f)
989 except lock.LockException:
987 except lock.LockException:
990 pass
988 pass
991 finally:
989 finally:
992 del wlock
990 del wlock
993
991
994 if not parentworking:
992 if not parentworking:
995 mf1 = mfmatches(ctx1)
993 mf1 = mfmatches(ctx1)
996 if working:
994 if working:
997 # we are comparing working dir against non-parent
995 # we are comparing working dir against non-parent
998 # generate a pseudo-manifest for the working dir
996 # generate a pseudo-manifest for the working dir
999 mf2 = mfmatches(self['.'])
997 mf2 = mfmatches(self['.'])
1000 for f in cmp + modified + added:
998 for f in cmp + modified + added:
1001 mf2[f] = None
999 mf2[f] = None
1002 mf2.set(f, ctx2.flags(f))
1000 mf2.set(f, ctx2.flags(f))
1003 for f in removed:
1001 for f in removed:
1004 if f in mf2:
1002 if f in mf2:
1005 del mf2[f]
1003 del mf2[f]
1006 else:
1004 else:
1007 # we are comparing two revisions
1005 # we are comparing two revisions
1008 deleted, unknown, ignored = [], [], []
1006 deleted, unknown, ignored = [], [], []
1009 mf2 = mfmatches(ctx2)
1007 mf2 = mfmatches(ctx2)
1010
1008
1011 modified, added, clean = [], [], []
1009 modified, added, clean = [], [], []
1012 for fn in mf2:
1010 for fn in mf2:
1013 if fn in mf1:
1011 if fn in mf1:
1014 if (mf1.flags(fn) != mf2.flags(fn) or
1012 if (mf1.flags(fn) != mf2.flags(fn) or
1015 (mf1[fn] != mf2[fn] and
1013 (mf1[fn] != mf2[fn] and
1016 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1014 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1017 modified.append(fn)
1015 modified.append(fn)
1018 elif listclean:
1016 elif listclean:
1019 clean.append(fn)
1017 clean.append(fn)
1020 del mf1[fn]
1018 del mf1[fn]
1021 else:
1019 else:
1022 added.append(fn)
1020 added.append(fn)
1023 removed = mf1.keys()
1021 removed = mf1.keys()
1024
1022
1025 r = modified, added, removed, deleted, unknown, ignored, clean
1023 r = modified, added, removed, deleted, unknown, ignored, clean
1026 [l.sort() for l in r]
1024 [l.sort() for l in r]
1027 return r
1025 return r
1028
1026
1029 def add(self, list):
1027 def add(self, list):
1030 wlock = self.wlock()
1028 wlock = self.wlock()
1031 try:
1029 try:
1032 rejected = []
1030 rejected = []
1033 for f in list:
1031 for f in list:
1034 p = self.wjoin(f)
1032 p = self.wjoin(f)
1035 try:
1033 try:
1036 st = os.lstat(p)
1034 st = os.lstat(p)
1037 except:
1035 except:
1038 self.ui.warn(_("%s does not exist!\n") % f)
1036 self.ui.warn(_("%s does not exist!\n") % f)
1039 rejected.append(f)
1037 rejected.append(f)
1040 continue
1038 continue
1041 if st.st_size > 10000000:
1039 if st.st_size > 10000000:
1042 self.ui.warn(_("%s: files over 10MB may cause memory and"
1040 self.ui.warn(_("%s: files over 10MB may cause memory and"
1043 " performance problems\n"
1041 " performance problems\n"
1044 "(use 'hg revert %s' to unadd the file)\n")
1042 "(use 'hg revert %s' to unadd the file)\n")
1045 % (f, f))
1043 % (f, f))
1046 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1044 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1047 self.ui.warn(_("%s not added: only files and symlinks "
1045 self.ui.warn(_("%s not added: only files and symlinks "
1048 "supported currently\n") % f)
1046 "supported currently\n") % f)
1049 rejected.append(p)
1047 rejected.append(p)
1050 elif self.dirstate[f] in 'amn':
1048 elif self.dirstate[f] in 'amn':
1051 self.ui.warn(_("%s already tracked!\n") % f)
1049 self.ui.warn(_("%s already tracked!\n") % f)
1052 elif self.dirstate[f] == 'r':
1050 elif self.dirstate[f] == 'r':
1053 self.dirstate.normallookup(f)
1051 self.dirstate.normallookup(f)
1054 else:
1052 else:
1055 self.dirstate.add(f)
1053 self.dirstate.add(f)
1056 return rejected
1054 return rejected
1057 finally:
1055 finally:
1058 del wlock
1056 del wlock
1059
1057
1060 def forget(self, list):
1058 def forget(self, list):
1061 wlock = self.wlock()
1059 wlock = self.wlock()
1062 try:
1060 try:
1063 for f in list:
1061 for f in list:
1064 if self.dirstate[f] != 'a':
1062 if self.dirstate[f] != 'a':
1065 self.ui.warn(_("%s not added!\n") % f)
1063 self.ui.warn(_("%s not added!\n") % f)
1066 else:
1064 else:
1067 self.dirstate.forget(f)
1065 self.dirstate.forget(f)
1068 finally:
1066 finally:
1069 del wlock
1067 del wlock
1070
1068
1071 def remove(self, list, unlink=False):
1069 def remove(self, list, unlink=False):
1072 wlock = None
1070 wlock = None
1073 try:
1071 try:
1074 if unlink:
1072 if unlink:
1075 for f in list:
1073 for f in list:
1076 try:
1074 try:
1077 util.unlink(self.wjoin(f))
1075 util.unlink(self.wjoin(f))
1078 except OSError, inst:
1076 except OSError, inst:
1079 if inst.errno != errno.ENOENT:
1077 if inst.errno != errno.ENOENT:
1080 raise
1078 raise
1081 wlock = self.wlock()
1079 wlock = self.wlock()
1082 for f in list:
1080 for f in list:
1083 if unlink and os.path.exists(self.wjoin(f)):
1081 if unlink and os.path.exists(self.wjoin(f)):
1084 self.ui.warn(_("%s still exists!\n") % f)
1082 self.ui.warn(_("%s still exists!\n") % f)
1085 elif self.dirstate[f] == 'a':
1083 elif self.dirstate[f] == 'a':
1086 self.dirstate.forget(f)
1084 self.dirstate.forget(f)
1087 elif f not in self.dirstate:
1085 elif f not in self.dirstate:
1088 self.ui.warn(_("%s not tracked!\n") % f)
1086 self.ui.warn(_("%s not tracked!\n") % f)
1089 else:
1087 else:
1090 self.dirstate.remove(f)
1088 self.dirstate.remove(f)
1091 finally:
1089 finally:
1092 del wlock
1090 del wlock
1093
1091
1094 def undelete(self, list):
1092 def undelete(self, list):
1095 wlock = None
1093 wlock = None
1096 try:
1094 try:
1097 manifests = [self.manifest.read(self.changelog.read(p)[0])
1095 manifests = [self.manifest.read(self.changelog.read(p)[0])
1098 for p in self.dirstate.parents() if p != nullid]
1096 for p in self.dirstate.parents() if p != nullid]
1099 wlock = self.wlock()
1097 wlock = self.wlock()
1100 for f in list:
1098 for f in list:
1101 if self.dirstate[f] != 'r':
1099 if self.dirstate[f] != 'r':
1102 self.ui.warn("%s not removed!\n" % f)
1100 self.ui.warn("%s not removed!\n" % f)
1103 else:
1101 else:
1104 m = f in manifests[0] and manifests[0] or manifests[1]
1102 m = f in manifests[0] and manifests[0] or manifests[1]
1105 t = self.file(f).read(m[f])
1103 t = self.file(f).read(m[f])
1106 self.wwrite(f, t, m.flags(f))
1104 self.wwrite(f, t, m.flags(f))
1107 self.dirstate.normal(f)
1105 self.dirstate.normal(f)
1108 finally:
1106 finally:
1109 del wlock
1107 del wlock
1110
1108
1111 def copy(self, source, dest):
1109 def copy(self, source, dest):
1112 wlock = None
1110 wlock = None
1113 try:
1111 try:
1114 p = self.wjoin(dest)
1112 p = self.wjoin(dest)
1115 if not (os.path.exists(p) or os.path.islink(p)):
1113 if not (os.path.exists(p) or os.path.islink(p)):
1116 self.ui.warn(_("%s does not exist!\n") % dest)
1114 self.ui.warn(_("%s does not exist!\n") % dest)
1117 elif not (os.path.isfile(p) or os.path.islink(p)):
1115 elif not (os.path.isfile(p) or os.path.islink(p)):
1118 self.ui.warn(_("copy failed: %s is not a file or a "
1116 self.ui.warn(_("copy failed: %s is not a file or a "
1119 "symbolic link\n") % dest)
1117 "symbolic link\n") % dest)
1120 else:
1118 else:
1121 wlock = self.wlock()
1119 wlock = self.wlock()
1122 if dest not in self.dirstate:
1120 if dest not in self.dirstate:
1123 self.dirstate.add(dest)
1121 self.dirstate.add(dest)
1124 self.dirstate.copy(source, dest)
1122 self.dirstate.copy(source, dest)
1125 finally:
1123 finally:
1126 del wlock
1124 del wlock
1127
1125
1128 def heads(self, start=None):
1126 def heads(self, start=None):
1129 heads = self.changelog.heads(start)
1127 heads = self.changelog.heads(start)
1130 # sort the output in rev descending order
1128 # sort the output in rev descending order
1131 heads = [(-self.changelog.rev(h), h) for h in heads]
1129 heads = [(-self.changelog.rev(h), h) for h in heads]
1132 return [n for (r, n) in util.sort(heads)]
1130 return [n for (r, n) in util.sort(heads)]
1133
1131
1134 def branchheads(self, branch=None, start=None):
1132 def branchheads(self, branch=None, start=None):
1135 if branch is None:
1133 if branch is None:
1136 branch = self[None].branch()
1134 branch = self[None].branch()
1137 branches = self.branchtags()
1135 branches = self.branchtags()
1138 if branch not in branches:
1136 if branch not in branches:
1139 return []
1137 return []
1140 # The basic algorithm is this:
1138 # The basic algorithm is this:
1141 #
1139 #
1142 # Start from the branch tip since there are no later revisions that can
1140 # Start from the branch tip since there are no later revisions that can
1143 # possibly be in this branch, and the tip is a guaranteed head.
1141 # possibly be in this branch, and the tip is a guaranteed head.
1144 #
1142 #
1145 # Remember the tip's parents as the first ancestors, since these by
1143 # Remember the tip's parents as the first ancestors, since these by
1146 # definition are not heads.
1144 # definition are not heads.
1147 #
1145 #
1148 # Step backwards from the brach tip through all the revisions. We are
1146 # Step backwards from the brach tip through all the revisions. We are
1149 # guaranteed by the rules of Mercurial that we will now be visiting the
1147 # guaranteed by the rules of Mercurial that we will now be visiting the
1150 # nodes in reverse topological order (children before parents).
1148 # nodes in reverse topological order (children before parents).
1151 #
1149 #
1152 # If a revision is one of the ancestors of a head then we can toss it
1150 # If a revision is one of the ancestors of a head then we can toss it
1153 # out of the ancestors set (we've already found it and won't be
1151 # out of the ancestors set (we've already found it and won't be
1154 # visiting it again) and put its parents in the ancestors set.
1152 # visiting it again) and put its parents in the ancestors set.
1155 #
1153 #
1156 # Otherwise, if a revision is in the branch it's another head, since it
1154 # Otherwise, if a revision is in the branch it's another head, since it
1157 # wasn't in the ancestor list of an existing head. So add it to the
1155 # wasn't in the ancestor list of an existing head. So add it to the
1158 # head list, and add its parents to the ancestor list.
1156 # head list, and add its parents to the ancestor list.
1159 #
1157 #
1160 # If it is not in the branch ignore it.
1158 # If it is not in the branch ignore it.
1161 #
1159 #
1162 # Once we have a list of heads, use nodesbetween to filter out all the
1160 # Once we have a list of heads, use nodesbetween to filter out all the
1163 # heads that cannot be reached from startrev. There may be a more
1161 # heads that cannot be reached from startrev. There may be a more
1164 # efficient way to do this as part of the previous algorithm.
1162 # efficient way to do this as part of the previous algorithm.
1165
1163
1166 set = util.set
1164 set = util.set
1167 heads = [self.changelog.rev(branches[branch])]
1165 heads = [self.changelog.rev(branches[branch])]
1168 # Don't care if ancestors contains nullrev or not.
1166 # Don't care if ancestors contains nullrev or not.
1169 ancestors = set(self.changelog.parentrevs(heads[0]))
1167 ancestors = set(self.changelog.parentrevs(heads[0]))
1170 for rev in xrange(heads[0] - 1, nullrev, -1):
1168 for rev in xrange(heads[0] - 1, nullrev, -1):
1171 if rev in ancestors:
1169 if rev in ancestors:
1172 ancestors.update(self.changelog.parentrevs(rev))
1170 ancestors.update(self.changelog.parentrevs(rev))
1173 ancestors.remove(rev)
1171 ancestors.remove(rev)
1174 elif self[rev].branch() == branch:
1172 elif self[rev].branch() == branch:
1175 heads.append(rev)
1173 heads.append(rev)
1176 ancestors.update(self.changelog.parentrevs(rev))
1174 ancestors.update(self.changelog.parentrevs(rev))
1177 heads = [self.changelog.node(rev) for rev in heads]
1175 heads = [self.changelog.node(rev) for rev in heads]
1178 if start is not None:
1176 if start is not None:
1179 heads = self.changelog.nodesbetween([start], heads)[2]
1177 heads = self.changelog.nodesbetween([start], heads)[2]
1180 return heads
1178 return heads
1181
1179
1182 def branches(self, nodes):
1180 def branches(self, nodes):
1183 if not nodes:
1181 if not nodes:
1184 nodes = [self.changelog.tip()]
1182 nodes = [self.changelog.tip()]
1185 b = []
1183 b = []
1186 for n in nodes:
1184 for n in nodes:
1187 t = n
1185 t = n
1188 while 1:
1186 while 1:
1189 p = self.changelog.parents(n)
1187 p = self.changelog.parents(n)
1190 if p[1] != nullid or p[0] == nullid:
1188 if p[1] != nullid or p[0] == nullid:
1191 b.append((t, n, p[0], p[1]))
1189 b.append((t, n, p[0], p[1]))
1192 break
1190 break
1193 n = p[0]
1191 n = p[0]
1194 return b
1192 return b
1195
1193
1196 def between(self, pairs):
1194 def between(self, pairs):
1197 r = []
1195 r = []
1198
1196
1199 for top, bottom in pairs:
1197 for top, bottom in pairs:
1200 n, l, i = top, [], 0
1198 n, l, i = top, [], 0
1201 f = 1
1199 f = 1
1202
1200
1203 while n != bottom:
1201 while n != bottom:
1204 p = self.changelog.parents(n)[0]
1202 p = self.changelog.parents(n)[0]
1205 if i == f:
1203 if i == f:
1206 l.append(n)
1204 l.append(n)
1207 f = f * 2
1205 f = f * 2
1208 n = p
1206 n = p
1209 i += 1
1207 i += 1
1210
1208
1211 r.append(l)
1209 r.append(l)
1212
1210
1213 return r
1211 return r
1214
1212
1215 def findincoming(self, remote, base=None, heads=None, force=False):
1213 def findincoming(self, remote, base=None, heads=None, force=False):
1216 """Return list of roots of the subsets of missing nodes from remote
1214 """Return list of roots of the subsets of missing nodes from remote
1217
1215
1218 If base dict is specified, assume that these nodes and their parents
1216 If base dict is specified, assume that these nodes and their parents
1219 exist on the remote side and that no child of a node of base exists
1217 exist on the remote side and that no child of a node of base exists
1220 in both remote and self.
1218 in both remote and self.
1221 Furthermore base will be updated to include the nodes that exists
1219 Furthermore base will be updated to include the nodes that exists
1222 in self and remote but no children exists in self and remote.
1220 in self and remote but no children exists in self and remote.
1223 If a list of heads is specified, return only nodes which are heads
1221 If a list of heads is specified, return only nodes which are heads
1224 or ancestors of these heads.
1222 or ancestors of these heads.
1225
1223
1226 All the ancestors of base are in self and in remote.
1224 All the ancestors of base are in self and in remote.
1227 All the descendants of the list returned are missing in self.
1225 All the descendants of the list returned are missing in self.
1228 (and so we know that the rest of the nodes are missing in remote, see
1226 (and so we know that the rest of the nodes are missing in remote, see
1229 outgoing)
1227 outgoing)
1230 """
1228 """
1231 m = self.changelog.nodemap
1229 m = self.changelog.nodemap
1232 search = []
1230 search = []
1233 fetch = {}
1231 fetch = {}
1234 seen = {}
1232 seen = {}
1235 seenbranch = {}
1233 seenbranch = {}
1236 if base == None:
1234 if base == None:
1237 base = {}
1235 base = {}
1238
1236
1239 if not heads:
1237 if not heads:
1240 heads = remote.heads()
1238 heads = remote.heads()
1241
1239
1242 if self.changelog.tip() == nullid:
1240 if self.changelog.tip() == nullid:
1243 base[nullid] = 1
1241 base[nullid] = 1
1244 if heads != [nullid]:
1242 if heads != [nullid]:
1245 return [nullid]
1243 return [nullid]
1246 return []
1244 return []
1247
1245
1248 # assume we're closer to the tip than the root
1246 # assume we're closer to the tip than the root
1249 # and start by examining the heads
1247 # and start by examining the heads
1250 self.ui.status(_("searching for changes\n"))
1248 self.ui.status(_("searching for changes\n"))
1251
1249
1252 unknown = []
1250 unknown = []
1253 for h in heads:
1251 for h in heads:
1254 if h not in m:
1252 if h not in m:
1255 unknown.append(h)
1253 unknown.append(h)
1256 else:
1254 else:
1257 base[h] = 1
1255 base[h] = 1
1258
1256
1259 if not unknown:
1257 if not unknown:
1260 return []
1258 return []
1261
1259
1262 req = dict.fromkeys(unknown)
1260 req = dict.fromkeys(unknown)
1263 reqcnt = 0
1261 reqcnt = 0
1264
1262
1265 # search through remote branches
1263 # search through remote branches
1266 # a 'branch' here is a linear segment of history, with four parts:
1264 # a 'branch' here is a linear segment of history, with four parts:
1267 # head, root, first parent, second parent
1265 # head, root, first parent, second parent
1268 # (a branch always has two parents (or none) by definition)
1266 # (a branch always has two parents (or none) by definition)
1269 unknown = remote.branches(unknown)
1267 unknown = remote.branches(unknown)
1270 while unknown:
1268 while unknown:
1271 r = []
1269 r = []
1272 while unknown:
1270 while unknown:
1273 n = unknown.pop(0)
1271 n = unknown.pop(0)
1274 if n[0] in seen:
1272 if n[0] in seen:
1275 continue
1273 continue
1276
1274
1277 self.ui.debug(_("examining %s:%s\n")
1275 self.ui.debug(_("examining %s:%s\n")
1278 % (short(n[0]), short(n[1])))
1276 % (short(n[0]), short(n[1])))
1279 if n[0] == nullid: # found the end of the branch
1277 if n[0] == nullid: # found the end of the branch
1280 pass
1278 pass
1281 elif n in seenbranch:
1279 elif n in seenbranch:
1282 self.ui.debug(_("branch already found\n"))
1280 self.ui.debug(_("branch already found\n"))
1283 continue
1281 continue
1284 elif n[1] and n[1] in m: # do we know the base?
1282 elif n[1] and n[1] in m: # do we know the base?
1285 self.ui.debug(_("found incomplete branch %s:%s\n")
1283 self.ui.debug(_("found incomplete branch %s:%s\n")
1286 % (short(n[0]), short(n[1])))
1284 % (short(n[0]), short(n[1])))
1287 search.append(n) # schedule branch range for scanning
1285 search.append(n) # schedule branch range for scanning
1288 seenbranch[n] = 1
1286 seenbranch[n] = 1
1289 else:
1287 else:
1290 if n[1] not in seen and n[1] not in fetch:
1288 if n[1] not in seen and n[1] not in fetch:
1291 if n[2] in m and n[3] in m:
1289 if n[2] in m and n[3] in m:
1292 self.ui.debug(_("found new changeset %s\n") %
1290 self.ui.debug(_("found new changeset %s\n") %
1293 short(n[1]))
1291 short(n[1]))
1294 fetch[n[1]] = 1 # earliest unknown
1292 fetch[n[1]] = 1 # earliest unknown
1295 for p in n[2:4]:
1293 for p in n[2:4]:
1296 if p in m:
1294 if p in m:
1297 base[p] = 1 # latest known
1295 base[p] = 1 # latest known
1298
1296
1299 for p in n[2:4]:
1297 for p in n[2:4]:
1300 if p not in req and p not in m:
1298 if p not in req and p not in m:
1301 r.append(p)
1299 r.append(p)
1302 req[p] = 1
1300 req[p] = 1
1303 seen[n[0]] = 1
1301 seen[n[0]] = 1
1304
1302
1305 if r:
1303 if r:
1306 reqcnt += 1
1304 reqcnt += 1
1307 self.ui.debug(_("request %d: %s\n") %
1305 self.ui.debug(_("request %d: %s\n") %
1308 (reqcnt, " ".join(map(short, r))))
1306 (reqcnt, " ".join(map(short, r))))
1309 for p in xrange(0, len(r), 10):
1307 for p in xrange(0, len(r), 10):
1310 for b in remote.branches(r[p:p+10]):
1308 for b in remote.branches(r[p:p+10]):
1311 self.ui.debug(_("received %s:%s\n") %
1309 self.ui.debug(_("received %s:%s\n") %
1312 (short(b[0]), short(b[1])))
1310 (short(b[0]), short(b[1])))
1313 unknown.append(b)
1311 unknown.append(b)
1314
1312
1315 # do binary search on the branches we found
1313 # do binary search on the branches we found
1316 while search:
1314 while search:
1317 n = search.pop(0)
1315 n = search.pop(0)
1318 reqcnt += 1
1316 reqcnt += 1
1319 l = remote.between([(n[0], n[1])])[0]
1317 l = remote.between([(n[0], n[1])])[0]
1320 l.append(n[1])
1318 l.append(n[1])
1321 p = n[0]
1319 p = n[0]
1322 f = 1
1320 f = 1
1323 for i in l:
1321 for i in l:
1324 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1322 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1325 if i in m:
1323 if i in m:
1326 if f <= 2:
1324 if f <= 2:
1327 self.ui.debug(_("found new branch changeset %s\n") %
1325 self.ui.debug(_("found new branch changeset %s\n") %
1328 short(p))
1326 short(p))
1329 fetch[p] = 1
1327 fetch[p] = 1
1330 base[i] = 1
1328 base[i] = 1
1331 else:
1329 else:
1332 self.ui.debug(_("narrowed branch search to %s:%s\n")
1330 self.ui.debug(_("narrowed branch search to %s:%s\n")
1333 % (short(p), short(i)))
1331 % (short(p), short(i)))
1334 search.append((p, i))
1332 search.append((p, i))
1335 break
1333 break
1336 p, f = i, f * 2
1334 p, f = i, f * 2
1337
1335
1338 # sanity check our fetch list
1336 # sanity check our fetch list
1339 for f in fetch.keys():
1337 for f in fetch.keys():
1340 if f in m:
1338 if f in m:
1341 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1339 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1342
1340
1343 if base.keys() == [nullid]:
1341 if base.keys() == [nullid]:
1344 if force:
1342 if force:
1345 self.ui.warn(_("warning: repository is unrelated\n"))
1343 self.ui.warn(_("warning: repository is unrelated\n"))
1346 else:
1344 else:
1347 raise util.Abort(_("repository is unrelated"))
1345 raise util.Abort(_("repository is unrelated"))
1348
1346
1349 self.ui.debug(_("found new changesets starting at ") +
1347 self.ui.debug(_("found new changesets starting at ") +
1350 " ".join([short(f) for f in fetch]) + "\n")
1348 " ".join([short(f) for f in fetch]) + "\n")
1351
1349
1352 self.ui.debug(_("%d total queries\n") % reqcnt)
1350 self.ui.debug(_("%d total queries\n") % reqcnt)
1353
1351
1354 return fetch.keys()
1352 return fetch.keys()
1355
1353
1356 def findoutgoing(self, remote, base=None, heads=None, force=False):
1354 def findoutgoing(self, remote, base=None, heads=None, force=False):
1357 """Return list of nodes that are roots of subsets not in remote
1355 """Return list of nodes that are roots of subsets not in remote
1358
1356
1359 If base dict is specified, assume that these nodes and their parents
1357 If base dict is specified, assume that these nodes and their parents
1360 exist on the remote side.
1358 exist on the remote side.
1361 If a list of heads is specified, return only nodes which are heads
1359 If a list of heads is specified, return only nodes which are heads
1362 or ancestors of these heads, and return a second element which
1360 or ancestors of these heads, and return a second element which
1363 contains all remote heads which get new children.
1361 contains all remote heads which get new children.
1364 """
1362 """
1365 if base == None:
1363 if base == None:
1366 base = {}
1364 base = {}
1367 self.findincoming(remote, base, heads, force=force)
1365 self.findincoming(remote, base, heads, force=force)
1368
1366
1369 self.ui.debug(_("common changesets up to ")
1367 self.ui.debug(_("common changesets up to ")
1370 + " ".join(map(short, base.keys())) + "\n")
1368 + " ".join(map(short, base.keys())) + "\n")
1371
1369
1372 remain = dict.fromkeys(self.changelog.nodemap)
1370 remain = dict.fromkeys(self.changelog.nodemap)
1373
1371
1374 # prune everything remote has from the tree
1372 # prune everything remote has from the tree
1375 del remain[nullid]
1373 del remain[nullid]
1376 remove = base.keys()
1374 remove = base.keys()
1377 while remove:
1375 while remove:
1378 n = remove.pop(0)
1376 n = remove.pop(0)
1379 if n in remain:
1377 if n in remain:
1380 del remain[n]
1378 del remain[n]
1381 for p in self.changelog.parents(n):
1379 for p in self.changelog.parents(n):
1382 remove.append(p)
1380 remove.append(p)
1383
1381
1384 # find every node whose parents have been pruned
1382 # find every node whose parents have been pruned
1385 subset = []
1383 subset = []
1386 # find every remote head that will get new children
1384 # find every remote head that will get new children
1387 updated_heads = {}
1385 updated_heads = {}
1388 for n in remain:
1386 for n in remain:
1389 p1, p2 = self.changelog.parents(n)
1387 p1, p2 = self.changelog.parents(n)
1390 if p1 not in remain and p2 not in remain:
1388 if p1 not in remain and p2 not in remain:
1391 subset.append(n)
1389 subset.append(n)
1392 if heads:
1390 if heads:
1393 if p1 in heads:
1391 if p1 in heads:
1394 updated_heads[p1] = True
1392 updated_heads[p1] = True
1395 if p2 in heads:
1393 if p2 in heads:
1396 updated_heads[p2] = True
1394 updated_heads[p2] = True
1397
1395
1398 # this is the set of all roots we have to push
1396 # this is the set of all roots we have to push
1399 if heads:
1397 if heads:
1400 return subset, updated_heads.keys()
1398 return subset, updated_heads.keys()
1401 else:
1399 else:
1402 return subset
1400 return subset
1403
1401
1404 def pull(self, remote, heads=None, force=False):
1402 def pull(self, remote, heads=None, force=False):
1405 lock = self.lock()
1403 lock = self.lock()
1406 try:
1404 try:
1407 fetch = self.findincoming(remote, heads=heads, force=force)
1405 fetch = self.findincoming(remote, heads=heads, force=force)
1408 if fetch == [nullid]:
1406 if fetch == [nullid]:
1409 self.ui.status(_("requesting all changes\n"))
1407 self.ui.status(_("requesting all changes\n"))
1410
1408
1411 if not fetch:
1409 if not fetch:
1412 self.ui.status(_("no changes found\n"))
1410 self.ui.status(_("no changes found\n"))
1413 return 0
1411 return 0
1414
1412
1415 if heads is None:
1413 if heads is None:
1416 cg = remote.changegroup(fetch, 'pull')
1414 cg = remote.changegroup(fetch, 'pull')
1417 else:
1415 else:
1418 if 'changegroupsubset' not in remote.capabilities:
1416 if 'changegroupsubset' not in remote.capabilities:
1419 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1417 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1420 cg = remote.changegroupsubset(fetch, heads, 'pull')
1418 cg = remote.changegroupsubset(fetch, heads, 'pull')
1421 return self.addchangegroup(cg, 'pull', remote.url())
1419 return self.addchangegroup(cg, 'pull', remote.url())
1422 finally:
1420 finally:
1423 del lock
1421 del lock
1424
1422
1425 def push(self, remote, force=False, revs=None):
1423 def push(self, remote, force=False, revs=None):
1426 # there are two ways to push to remote repo:
1424 # there are two ways to push to remote repo:
1427 #
1425 #
1428 # addchangegroup assumes local user can lock remote
1426 # addchangegroup assumes local user can lock remote
1429 # repo (local filesystem, old ssh servers).
1427 # repo (local filesystem, old ssh servers).
1430 #
1428 #
1431 # unbundle assumes local user cannot lock remote repo (new ssh
1429 # unbundle assumes local user cannot lock remote repo (new ssh
1432 # servers, http servers).
1430 # servers, http servers).
1433
1431
1434 if remote.capable('unbundle'):
1432 if remote.capable('unbundle'):
1435 return self.push_unbundle(remote, force, revs)
1433 return self.push_unbundle(remote, force, revs)
1436 return self.push_addchangegroup(remote, force, revs)
1434 return self.push_addchangegroup(remote, force, revs)
1437
1435
1438 def prepush(self, remote, force, revs):
1436 def prepush(self, remote, force, revs):
1439 base = {}
1437 base = {}
1440 remote_heads = remote.heads()
1438 remote_heads = remote.heads()
1441 inc = self.findincoming(remote, base, remote_heads, force=force)
1439 inc = self.findincoming(remote, base, remote_heads, force=force)
1442
1440
1443 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1441 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1444 if revs is not None:
1442 if revs is not None:
1445 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1443 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1446 else:
1444 else:
1447 bases, heads = update, self.changelog.heads()
1445 bases, heads = update, self.changelog.heads()
1448
1446
1449 if not bases:
1447 if not bases:
1450 self.ui.status(_("no changes found\n"))
1448 self.ui.status(_("no changes found\n"))
1451 return None, 1
1449 return None, 1
1452 elif not force:
1450 elif not force:
1453 # check if we're creating new remote heads
1451 # check if we're creating new remote heads
1454 # to be a remote head after push, node must be either
1452 # to be a remote head after push, node must be either
1455 # - unknown locally
1453 # - unknown locally
1456 # - a local outgoing head descended from update
1454 # - a local outgoing head descended from update
1457 # - a remote head that's known locally and not
1455 # - a remote head that's known locally and not
1458 # ancestral to an outgoing head
1456 # ancestral to an outgoing head
1459
1457
1460 warn = 0
1458 warn = 0
1461
1459
1462 if remote_heads == [nullid]:
1460 if remote_heads == [nullid]:
1463 warn = 0
1461 warn = 0
1464 elif not revs and len(heads) > len(remote_heads):
1462 elif not revs and len(heads) > len(remote_heads):
1465 warn = 1
1463 warn = 1
1466 else:
1464 else:
1467 newheads = list(heads)
1465 newheads = list(heads)
1468 for r in remote_heads:
1466 for r in remote_heads:
1469 if r in self.changelog.nodemap:
1467 if r in self.changelog.nodemap:
1470 desc = self.changelog.heads(r, heads)
1468 desc = self.changelog.heads(r, heads)
1471 l = [h for h in heads if h in desc]
1469 l = [h for h in heads if h in desc]
1472 if not l:
1470 if not l:
1473 newheads.append(r)
1471 newheads.append(r)
1474 else:
1472 else:
1475 newheads.append(r)
1473 newheads.append(r)
1476 if len(newheads) > len(remote_heads):
1474 if len(newheads) > len(remote_heads):
1477 warn = 1
1475 warn = 1
1478
1476
1479 if warn:
1477 if warn:
1480 self.ui.warn(_("abort: push creates new remote heads!\n"))
1478 self.ui.warn(_("abort: push creates new remote heads!\n"))
1481 self.ui.status(_("(did you forget to merge?"
1479 self.ui.status(_("(did you forget to merge?"
1482 " use push -f to force)\n"))
1480 " use push -f to force)\n"))
1483 return None, 0
1481 return None, 0
1484 elif inc:
1482 elif inc:
1485 self.ui.warn(_("note: unsynced remote changes!\n"))
1483 self.ui.warn(_("note: unsynced remote changes!\n"))
1486
1484
1487
1485
1488 if revs is None:
1486 if revs is None:
1489 cg = self.changegroup(update, 'push')
1487 cg = self.changegroup(update, 'push')
1490 else:
1488 else:
1491 cg = self.changegroupsubset(update, revs, 'push')
1489 cg = self.changegroupsubset(update, revs, 'push')
1492 return cg, remote_heads
1490 return cg, remote_heads
1493
1491
1494 def push_addchangegroup(self, remote, force, revs):
1492 def push_addchangegroup(self, remote, force, revs):
1495 lock = remote.lock()
1493 lock = remote.lock()
1496 try:
1494 try:
1497 ret = self.prepush(remote, force, revs)
1495 ret = self.prepush(remote, force, revs)
1498 if ret[0] is not None:
1496 if ret[0] is not None:
1499 cg, remote_heads = ret
1497 cg, remote_heads = ret
1500 return remote.addchangegroup(cg, 'push', self.url())
1498 return remote.addchangegroup(cg, 'push', self.url())
1501 return ret[1]
1499 return ret[1]
1502 finally:
1500 finally:
1503 del lock
1501 del lock
1504
1502
1505 def push_unbundle(self, remote, force, revs):
1503 def push_unbundle(self, remote, force, revs):
1506 # local repo finds heads on server, finds out what revs it
1504 # local repo finds heads on server, finds out what revs it
1507 # must push. once revs transferred, if server finds it has
1505 # must push. once revs transferred, if server finds it has
1508 # different heads (someone else won commit/push race), server
1506 # different heads (someone else won commit/push race), server
1509 # aborts.
1507 # aborts.
1510
1508
1511 ret = self.prepush(remote, force, revs)
1509 ret = self.prepush(remote, force, revs)
1512 if ret[0] is not None:
1510 if ret[0] is not None:
1513 cg, remote_heads = ret
1511 cg, remote_heads = ret
1514 if force: remote_heads = ['force']
1512 if force: remote_heads = ['force']
1515 return remote.unbundle(cg, remote_heads, 'push')
1513 return remote.unbundle(cg, remote_heads, 'push')
1516 return ret[1]
1514 return ret[1]
1517
1515
1518 def changegroupinfo(self, nodes, source):
1516 def changegroupinfo(self, nodes, source):
1519 if self.ui.verbose or source == 'bundle':
1517 if self.ui.verbose or source == 'bundle':
1520 self.ui.status(_("%d changesets found\n") % len(nodes))
1518 self.ui.status(_("%d changesets found\n") % len(nodes))
1521 if self.ui.debugflag:
1519 if self.ui.debugflag:
1522 self.ui.debug(_("List of changesets:\n"))
1520 self.ui.debug(_("List of changesets:\n"))
1523 for node in nodes:
1521 for node in nodes:
1524 self.ui.debug("%s\n" % hex(node))
1522 self.ui.debug("%s\n" % hex(node))
1525
1523
1526 def changegroupsubset(self, bases, heads, source, extranodes=None):
1524 def changegroupsubset(self, bases, heads, source, extranodes=None):
1527 """This function generates a changegroup consisting of all the nodes
1525 """This function generates a changegroup consisting of all the nodes
1528 that are descendents of any of the bases, and ancestors of any of
1526 that are descendents of any of the bases, and ancestors of any of
1529 the heads.
1527 the heads.
1530
1528
1531 It is fairly complex as determining which filenodes and which
1529 It is fairly complex as determining which filenodes and which
1532 manifest nodes need to be included for the changeset to be complete
1530 manifest nodes need to be included for the changeset to be complete
1533 is non-trivial.
1531 is non-trivial.
1534
1532
1535 Another wrinkle is doing the reverse, figuring out which changeset in
1533 Another wrinkle is doing the reverse, figuring out which changeset in
1536 the changegroup a particular filenode or manifestnode belongs to.
1534 the changegroup a particular filenode or manifestnode belongs to.
1537
1535
1538 The caller can specify some nodes that must be included in the
1536 The caller can specify some nodes that must be included in the
1539 changegroup using the extranodes argument. It should be a dict
1537 changegroup using the extranodes argument. It should be a dict
1540 where the keys are the filenames (or 1 for the manifest), and the
1538 where the keys are the filenames (or 1 for the manifest), and the
1541 values are lists of (node, linknode) tuples, where node is a wanted
1539 values are lists of (node, linknode) tuples, where node is a wanted
1542 node and linknode is the changelog node that should be transmitted as
1540 node and linknode is the changelog node that should be transmitted as
1543 the linkrev.
1541 the linkrev.
1544 """
1542 """
1545
1543
1546 self.hook('preoutgoing', throw=True, source=source)
1544 self.hook('preoutgoing', throw=True, source=source)
1547
1545
1548 # Set up some initial variables
1546 # Set up some initial variables
1549 # Make it easy to refer to self.changelog
1547 # Make it easy to refer to self.changelog
1550 cl = self.changelog
1548 cl = self.changelog
1551 # msng is short for missing - compute the list of changesets in this
1549 # msng is short for missing - compute the list of changesets in this
1552 # changegroup.
1550 # changegroup.
1553 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1551 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1554 self.changegroupinfo(msng_cl_lst, source)
1552 self.changegroupinfo(msng_cl_lst, source)
1555 # Some bases may turn out to be superfluous, and some heads may be
1553 # Some bases may turn out to be superfluous, and some heads may be
1556 # too. nodesbetween will return the minimal set of bases and heads
1554 # too. nodesbetween will return the minimal set of bases and heads
1557 # necessary to re-create the changegroup.
1555 # necessary to re-create the changegroup.
1558
1556
1559 # Known heads are the list of heads that it is assumed the recipient
1557 # Known heads are the list of heads that it is assumed the recipient
1560 # of this changegroup will know about.
1558 # of this changegroup will know about.
1561 knownheads = {}
1559 knownheads = {}
1562 # We assume that all parents of bases are known heads.
1560 # We assume that all parents of bases are known heads.
1563 for n in bases:
1561 for n in bases:
1564 for p in cl.parents(n):
1562 for p in cl.parents(n):
1565 if p != nullid:
1563 if p != nullid:
1566 knownheads[p] = 1
1564 knownheads[p] = 1
1567 knownheads = knownheads.keys()
1565 knownheads = knownheads.keys()
1568 if knownheads:
1566 if knownheads:
1569 # Now that we know what heads are known, we can compute which
1567 # Now that we know what heads are known, we can compute which
1570 # changesets are known. The recipient must know about all
1568 # changesets are known. The recipient must know about all
1571 # changesets required to reach the known heads from the null
1569 # changesets required to reach the known heads from the null
1572 # changeset.
1570 # changeset.
1573 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1571 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1574 junk = None
1572 junk = None
1575 # Transform the list into an ersatz set.
1573 # Transform the list into an ersatz set.
1576 has_cl_set = dict.fromkeys(has_cl_set)
1574 has_cl_set = dict.fromkeys(has_cl_set)
1577 else:
1575 else:
1578 # If there were no known heads, the recipient cannot be assumed to
1576 # If there were no known heads, the recipient cannot be assumed to
1579 # know about any changesets.
1577 # know about any changesets.
1580 has_cl_set = {}
1578 has_cl_set = {}
1581
1579
1582 # Make it easy to refer to self.manifest
1580 # Make it easy to refer to self.manifest
1583 mnfst = self.manifest
1581 mnfst = self.manifest
1584 # We don't know which manifests are missing yet
1582 # We don't know which manifests are missing yet
1585 msng_mnfst_set = {}
1583 msng_mnfst_set = {}
1586 # Nor do we know which filenodes are missing.
1584 # Nor do we know which filenodes are missing.
1587 msng_filenode_set = {}
1585 msng_filenode_set = {}
1588
1586
1589 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1587 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1590 junk = None
1588 junk = None
1591
1589
1592 # A changeset always belongs to itself, so the changenode lookup
1590 # A changeset always belongs to itself, so the changenode lookup
1593 # function for a changenode is identity.
1591 # function for a changenode is identity.
1594 def identity(x):
1592 def identity(x):
1595 return x
1593 return x
1596
1594
1597 # A function generating function. Sets up an environment for the
1595 # A function generating function. Sets up an environment for the
1598 # inner function.
1596 # inner function.
1599 def cmp_by_rev_func(revlog):
1597 def cmp_by_rev_func(revlog):
1600 # Compare two nodes by their revision number in the environment's
1598 # Compare two nodes by their revision number in the environment's
1601 # revision history. Since the revision number both represents the
1599 # revision history. Since the revision number both represents the
1602 # most efficient order to read the nodes in, and represents a
1600 # most efficient order to read the nodes in, and represents a
1603 # topological sorting of the nodes, this function is often useful.
1601 # topological sorting of the nodes, this function is often useful.
1604 def cmp_by_rev(a, b):
1602 def cmp_by_rev(a, b):
1605 return cmp(revlog.rev(a), revlog.rev(b))
1603 return cmp(revlog.rev(a), revlog.rev(b))
1606 return cmp_by_rev
1604 return cmp_by_rev
1607
1605
1608 # If we determine that a particular file or manifest node must be a
1606 # If we determine that a particular file or manifest node must be a
1609 # node that the recipient of the changegroup will already have, we can
1607 # node that the recipient of the changegroup will already have, we can
1610 # also assume the recipient will have all the parents. This function
1608 # also assume the recipient will have all the parents. This function
1611 # prunes them from the set of missing nodes.
1609 # prunes them from the set of missing nodes.
1612 def prune_parents(revlog, hasset, msngset):
1610 def prune_parents(revlog, hasset, msngset):
1613 haslst = hasset.keys()
1611 haslst = hasset.keys()
1614 haslst.sort(cmp_by_rev_func(revlog))
1612 haslst.sort(cmp_by_rev_func(revlog))
1615 for node in haslst:
1613 for node in haslst:
1616 parentlst = [p for p in revlog.parents(node) if p != nullid]
1614 parentlst = [p for p in revlog.parents(node) if p != nullid]
1617 while parentlst:
1615 while parentlst:
1618 n = parentlst.pop()
1616 n = parentlst.pop()
1619 if n not in hasset:
1617 if n not in hasset:
1620 hasset[n] = 1
1618 hasset[n] = 1
1621 p = [p for p in revlog.parents(n) if p != nullid]
1619 p = [p for p in revlog.parents(n) if p != nullid]
1622 parentlst.extend(p)
1620 parentlst.extend(p)
1623 for n in hasset:
1621 for n in hasset:
1624 msngset.pop(n, None)
1622 msngset.pop(n, None)
1625
1623
1626 # This is a function generating function used to set up an environment
1624 # This is a function generating function used to set up an environment
1627 # for the inner function to execute in.
1625 # for the inner function to execute in.
1628 def manifest_and_file_collector(changedfileset):
1626 def manifest_and_file_collector(changedfileset):
1629 # This is an information gathering function that gathers
1627 # This is an information gathering function that gathers
1630 # information from each changeset node that goes out as part of
1628 # information from each changeset node that goes out as part of
1631 # the changegroup. The information gathered is a list of which
1629 # the changegroup. The information gathered is a list of which
1632 # manifest nodes are potentially required (the recipient may
1630 # manifest nodes are potentially required (the recipient may
1633 # already have them) and total list of all files which were
1631 # already have them) and total list of all files which were
1634 # changed in any changeset in the changegroup.
1632 # changed in any changeset in the changegroup.
1635 #
1633 #
1636 # We also remember the first changenode we saw any manifest
1634 # We also remember the first changenode we saw any manifest
1637 # referenced by so we can later determine which changenode 'owns'
1635 # referenced by so we can later determine which changenode 'owns'
1638 # the manifest.
1636 # the manifest.
1639 def collect_manifests_and_files(clnode):
1637 def collect_manifests_and_files(clnode):
1640 c = cl.read(clnode)
1638 c = cl.read(clnode)
1641 for f in c[3]:
1639 for f in c[3]:
1642 # This is to make sure we only have one instance of each
1640 # This is to make sure we only have one instance of each
1643 # filename string for each filename.
1641 # filename string for each filename.
1644 changedfileset.setdefault(f, f)
1642 changedfileset.setdefault(f, f)
1645 msng_mnfst_set.setdefault(c[0], clnode)
1643 msng_mnfst_set.setdefault(c[0], clnode)
1646 return collect_manifests_and_files
1644 return collect_manifests_and_files
1647
1645
1648 # Figure out which manifest nodes (of the ones we think might be part
1646 # Figure out which manifest nodes (of the ones we think might be part
1649 # of the changegroup) the recipient must know about and remove them
1647 # of the changegroup) the recipient must know about and remove them
1650 # from the changegroup.
1648 # from the changegroup.
1651 def prune_manifests():
1649 def prune_manifests():
1652 has_mnfst_set = {}
1650 has_mnfst_set = {}
1653 for n in msng_mnfst_set:
1651 for n in msng_mnfst_set:
1654 # If a 'missing' manifest thinks it belongs to a changenode
1652 # If a 'missing' manifest thinks it belongs to a changenode
1655 # the recipient is assumed to have, obviously the recipient
1653 # the recipient is assumed to have, obviously the recipient
1656 # must have that manifest.
1654 # must have that manifest.
1657 linknode = cl.node(mnfst.linkrev(n))
1655 linknode = cl.node(mnfst.linkrev(n))
1658 if linknode in has_cl_set:
1656 if linknode in has_cl_set:
1659 has_mnfst_set[n] = 1
1657 has_mnfst_set[n] = 1
1660 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1658 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1661
1659
1662 # Use the information collected in collect_manifests_and_files to say
1660 # Use the information collected in collect_manifests_and_files to say
1663 # which changenode any manifestnode belongs to.
1661 # which changenode any manifestnode belongs to.
1664 def lookup_manifest_link(mnfstnode):
1662 def lookup_manifest_link(mnfstnode):
1665 return msng_mnfst_set[mnfstnode]
1663 return msng_mnfst_set[mnfstnode]
1666
1664
1667 # A function generating function that sets up the initial environment
1665 # A function generating function that sets up the initial environment
1668 # the inner function.
1666 # the inner function.
1669 def filenode_collector(changedfiles):
1667 def filenode_collector(changedfiles):
1670 next_rev = [0]
1668 next_rev = [0]
1671 # This gathers information from each manifestnode included in the
1669 # This gathers information from each manifestnode included in the
1672 # changegroup about which filenodes the manifest node references
1670 # changegroup about which filenodes the manifest node references
1673 # so we can include those in the changegroup too.
1671 # so we can include those in the changegroup too.
1674 #
1672 #
1675 # It also remembers which changenode each filenode belongs to. It
1673 # It also remembers which changenode each filenode belongs to. It
1676 # does this by assuming the a filenode belongs to the changenode
1674 # does this by assuming the a filenode belongs to the changenode
1677 # the first manifest that references it belongs to.
1675 # the first manifest that references it belongs to.
1678 def collect_msng_filenodes(mnfstnode):
1676 def collect_msng_filenodes(mnfstnode):
1679 r = mnfst.rev(mnfstnode)
1677 r = mnfst.rev(mnfstnode)
1680 if r == next_rev[0]:
1678 if r == next_rev[0]:
1681 # If the last rev we looked at was the one just previous,
1679 # If the last rev we looked at was the one just previous,
1682 # we only need to see a diff.
1680 # we only need to see a diff.
1683 deltamf = mnfst.readdelta(mnfstnode)
1681 deltamf = mnfst.readdelta(mnfstnode)
1684 # For each line in the delta
1682 # For each line in the delta
1685 for f, fnode in deltamf.items():
1683 for f, fnode in deltamf.items():
1686 f = changedfiles.get(f, None)
1684 f = changedfiles.get(f, None)
1687 # And if the file is in the list of files we care
1685 # And if the file is in the list of files we care
1688 # about.
1686 # about.
1689 if f is not None:
1687 if f is not None:
1690 # Get the changenode this manifest belongs to
1688 # Get the changenode this manifest belongs to
1691 clnode = msng_mnfst_set[mnfstnode]
1689 clnode = msng_mnfst_set[mnfstnode]
1692 # Create the set of filenodes for the file if
1690 # Create the set of filenodes for the file if
1693 # there isn't one already.
1691 # there isn't one already.
1694 ndset = msng_filenode_set.setdefault(f, {})
1692 ndset = msng_filenode_set.setdefault(f, {})
1695 # And set the filenode's changelog node to the
1693 # And set the filenode's changelog node to the
1696 # manifest's if it hasn't been set already.
1694 # manifest's if it hasn't been set already.
1697 ndset.setdefault(fnode, clnode)
1695 ndset.setdefault(fnode, clnode)
1698 else:
1696 else:
1699 # Otherwise we need a full manifest.
1697 # Otherwise we need a full manifest.
1700 m = mnfst.read(mnfstnode)
1698 m = mnfst.read(mnfstnode)
1701 # For every file in we care about.
1699 # For every file in we care about.
1702 for f in changedfiles:
1700 for f in changedfiles:
1703 fnode = m.get(f, None)
1701 fnode = m.get(f, None)
1704 # If it's in the manifest
1702 # If it's in the manifest
1705 if fnode is not None:
1703 if fnode is not None:
1706 # See comments above.
1704 # See comments above.
1707 clnode = msng_mnfst_set[mnfstnode]
1705 clnode = msng_mnfst_set[mnfstnode]
1708 ndset = msng_filenode_set.setdefault(f, {})
1706 ndset = msng_filenode_set.setdefault(f, {})
1709 ndset.setdefault(fnode, clnode)
1707 ndset.setdefault(fnode, clnode)
1710 # Remember the revision we hope to see next.
1708 # Remember the revision we hope to see next.
1711 next_rev[0] = r + 1
1709 next_rev[0] = r + 1
1712 return collect_msng_filenodes
1710 return collect_msng_filenodes
1713
1711
1714 # We have a list of filenodes we think we need for a file, lets remove
1712 # We have a list of filenodes we think we need for a file, lets remove
1715 # all those we now the recipient must have.
1713 # all those we now the recipient must have.
1716 def prune_filenodes(f, filerevlog):
1714 def prune_filenodes(f, filerevlog):
1717 msngset = msng_filenode_set[f]
1715 msngset = msng_filenode_set[f]
1718 hasset = {}
1716 hasset = {}
1719 # If a 'missing' filenode thinks it belongs to a changenode we
1717 # If a 'missing' filenode thinks it belongs to a changenode we
1720 # assume the recipient must have, then the recipient must have
1718 # assume the recipient must have, then the recipient must have
1721 # that filenode.
1719 # that filenode.
1722 for n in msngset:
1720 for n in msngset:
1723 clnode = cl.node(filerevlog.linkrev(n))
1721 clnode = cl.node(filerevlog.linkrev(n))
1724 if clnode in has_cl_set:
1722 if clnode in has_cl_set:
1725 hasset[n] = 1
1723 hasset[n] = 1
1726 prune_parents(filerevlog, hasset, msngset)
1724 prune_parents(filerevlog, hasset, msngset)
1727
1725
1728 # A function generator function that sets up the a context for the
1726 # A function generator function that sets up the a context for the
1729 # inner function.
1727 # inner function.
1730 def lookup_filenode_link_func(fname):
1728 def lookup_filenode_link_func(fname):
1731 msngset = msng_filenode_set[fname]
1729 msngset = msng_filenode_set[fname]
1732 # Lookup the changenode the filenode belongs to.
1730 # Lookup the changenode the filenode belongs to.
1733 def lookup_filenode_link(fnode):
1731 def lookup_filenode_link(fnode):
1734 return msngset[fnode]
1732 return msngset[fnode]
1735 return lookup_filenode_link
1733 return lookup_filenode_link
1736
1734
1737 # Add the nodes that were explicitly requested.
1735 # Add the nodes that were explicitly requested.
1738 def add_extra_nodes(name, nodes):
1736 def add_extra_nodes(name, nodes):
1739 if not extranodes or name not in extranodes:
1737 if not extranodes or name not in extranodes:
1740 return
1738 return
1741
1739
1742 for node, linknode in extranodes[name]:
1740 for node, linknode in extranodes[name]:
1743 if node not in nodes:
1741 if node not in nodes:
1744 nodes[node] = linknode
1742 nodes[node] = linknode
1745
1743
1746 # Now that we have all theses utility functions to help out and
1744 # Now that we have all theses utility functions to help out and
1747 # logically divide up the task, generate the group.
1745 # logically divide up the task, generate the group.
1748 def gengroup():
1746 def gengroup():
1749 # The set of changed files starts empty.
1747 # The set of changed files starts empty.
1750 changedfiles = {}
1748 changedfiles = {}
1751 # Create a changenode group generator that will call our functions
1749 # Create a changenode group generator that will call our functions
1752 # back to lookup the owning changenode and collect information.
1750 # back to lookup the owning changenode and collect information.
1753 group = cl.group(msng_cl_lst, identity,
1751 group = cl.group(msng_cl_lst, identity,
1754 manifest_and_file_collector(changedfiles))
1752 manifest_and_file_collector(changedfiles))
1755 for chnk in group:
1753 for chnk in group:
1756 yield chnk
1754 yield chnk
1757
1755
1758 # The list of manifests has been collected by the generator
1756 # The list of manifests has been collected by the generator
1759 # calling our functions back.
1757 # calling our functions back.
1760 prune_manifests()
1758 prune_manifests()
1761 add_extra_nodes(1, msng_mnfst_set)
1759 add_extra_nodes(1, msng_mnfst_set)
1762 msng_mnfst_lst = msng_mnfst_set.keys()
1760 msng_mnfst_lst = msng_mnfst_set.keys()
1763 # Sort the manifestnodes by revision number.
1761 # Sort the manifestnodes by revision number.
1764 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1762 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1765 # Create a generator for the manifestnodes that calls our lookup
1763 # Create a generator for the manifestnodes that calls our lookup
1766 # and data collection functions back.
1764 # and data collection functions back.
1767 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1765 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1768 filenode_collector(changedfiles))
1766 filenode_collector(changedfiles))
1769 for chnk in group:
1767 for chnk in group:
1770 yield chnk
1768 yield chnk
1771
1769
1772 # These are no longer needed, dereference and toss the memory for
1770 # These are no longer needed, dereference and toss the memory for
1773 # them.
1771 # them.
1774 msng_mnfst_lst = None
1772 msng_mnfst_lst = None
1775 msng_mnfst_set.clear()
1773 msng_mnfst_set.clear()
1776
1774
1777 if extranodes:
1775 if extranodes:
1778 for fname in extranodes:
1776 for fname in extranodes:
1779 if isinstance(fname, int):
1777 if isinstance(fname, int):
1780 continue
1778 continue
1781 add_extra_nodes(fname,
1779 add_extra_nodes(fname,
1782 msng_filenode_set.setdefault(fname, {}))
1780 msng_filenode_set.setdefault(fname, {}))
1783 changedfiles[fname] = 1
1781 changedfiles[fname] = 1
1784 # Go through all our files in order sorted by name.
1782 # Go through all our files in order sorted by name.
1785 for fname in util.sort(changedfiles):
1783 for fname in util.sort(changedfiles):
1786 filerevlog = self.file(fname)
1784 filerevlog = self.file(fname)
1787 if not len(filerevlog):
1785 if not len(filerevlog):
1788 raise util.Abort(_("empty or missing revlog for %s") % fname)
1786 raise util.Abort(_("empty or missing revlog for %s") % fname)
1789 # Toss out the filenodes that the recipient isn't really
1787 # Toss out the filenodes that the recipient isn't really
1790 # missing.
1788 # missing.
1791 if fname in msng_filenode_set:
1789 if fname in msng_filenode_set:
1792 prune_filenodes(fname, filerevlog)
1790 prune_filenodes(fname, filerevlog)
1793 msng_filenode_lst = msng_filenode_set[fname].keys()
1791 msng_filenode_lst = msng_filenode_set[fname].keys()
1794 else:
1792 else:
1795 msng_filenode_lst = []
1793 msng_filenode_lst = []
1796 # If any filenodes are left, generate the group for them,
1794 # If any filenodes are left, generate the group for them,
1797 # otherwise don't bother.
1795 # otherwise don't bother.
1798 if len(msng_filenode_lst) > 0:
1796 if len(msng_filenode_lst) > 0:
1799 yield changegroup.chunkheader(len(fname))
1797 yield changegroup.chunkheader(len(fname))
1800 yield fname
1798 yield fname
1801 # Sort the filenodes by their revision #
1799 # Sort the filenodes by their revision #
1802 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1800 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1803 # Create a group generator and only pass in a changenode
1801 # Create a group generator and only pass in a changenode
1804 # lookup function as we need to collect no information
1802 # lookup function as we need to collect no information
1805 # from filenodes.
1803 # from filenodes.
1806 group = filerevlog.group(msng_filenode_lst,
1804 group = filerevlog.group(msng_filenode_lst,
1807 lookup_filenode_link_func(fname))
1805 lookup_filenode_link_func(fname))
1808 for chnk in group:
1806 for chnk in group:
1809 yield chnk
1807 yield chnk
1810 if fname in msng_filenode_set:
1808 if fname in msng_filenode_set:
1811 # Don't need this anymore, toss it to free memory.
1809 # Don't need this anymore, toss it to free memory.
1812 del msng_filenode_set[fname]
1810 del msng_filenode_set[fname]
1813 # Signal that no more groups are left.
1811 # Signal that no more groups are left.
1814 yield changegroup.closechunk()
1812 yield changegroup.closechunk()
1815
1813
1816 if msng_cl_lst:
1814 if msng_cl_lst:
1817 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1815 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1818
1816
1819 return util.chunkbuffer(gengroup())
1817 return util.chunkbuffer(gengroup())
1820
1818
1821 def changegroup(self, basenodes, source):
1819 def changegroup(self, basenodes, source):
1822 """Generate a changegroup of all nodes that we have that a recipient
1820 """Generate a changegroup of all nodes that we have that a recipient
1823 doesn't.
1821 doesn't.
1824
1822
1825 This is much easier than the previous function as we can assume that
1823 This is much easier than the previous function as we can assume that
1826 the recipient has any changenode we aren't sending them."""
1824 the recipient has any changenode we aren't sending them."""
1827
1825
1828 self.hook('preoutgoing', throw=True, source=source)
1826 self.hook('preoutgoing', throw=True, source=source)
1829
1827
1830 cl = self.changelog
1828 cl = self.changelog
1831 nodes = cl.nodesbetween(basenodes, None)[0]
1829 nodes = cl.nodesbetween(basenodes, None)[0]
1832 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1830 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1833 self.changegroupinfo(nodes, source)
1831 self.changegroupinfo(nodes, source)
1834
1832
1835 def identity(x):
1833 def identity(x):
1836 return x
1834 return x
1837
1835
1838 def gennodelst(log):
1836 def gennodelst(log):
1839 for r in log:
1837 for r in log:
1840 n = log.node(r)
1838 n = log.node(r)
1841 if log.linkrev(n) in revset:
1839 if log.linkrev(n) in revset:
1842 yield n
1840 yield n
1843
1841
1844 def changed_file_collector(changedfileset):
1842 def changed_file_collector(changedfileset):
1845 def collect_changed_files(clnode):
1843 def collect_changed_files(clnode):
1846 c = cl.read(clnode)
1844 c = cl.read(clnode)
1847 for fname in c[3]:
1845 for fname in c[3]:
1848 changedfileset[fname] = 1
1846 changedfileset[fname] = 1
1849 return collect_changed_files
1847 return collect_changed_files
1850
1848
1851 def lookuprevlink_func(revlog):
1849 def lookuprevlink_func(revlog):
1852 def lookuprevlink(n):
1850 def lookuprevlink(n):
1853 return cl.node(revlog.linkrev(n))
1851 return cl.node(revlog.linkrev(n))
1854 return lookuprevlink
1852 return lookuprevlink
1855
1853
1856 def gengroup():
1854 def gengroup():
1857 # construct a list of all changed files
1855 # construct a list of all changed files
1858 changedfiles = {}
1856 changedfiles = {}
1859
1857
1860 for chnk in cl.group(nodes, identity,
1858 for chnk in cl.group(nodes, identity,
1861 changed_file_collector(changedfiles)):
1859 changed_file_collector(changedfiles)):
1862 yield chnk
1860 yield chnk
1863
1861
1864 mnfst = self.manifest
1862 mnfst = self.manifest
1865 nodeiter = gennodelst(mnfst)
1863 nodeiter = gennodelst(mnfst)
1866 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1864 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1867 yield chnk
1865 yield chnk
1868
1866
1869 for fname in util.sort(changedfiles):
1867 for fname in util.sort(changedfiles):
1870 filerevlog = self.file(fname)
1868 filerevlog = self.file(fname)
1871 if not len(filerevlog):
1869 if not len(filerevlog):
1872 raise util.Abort(_("empty or missing revlog for %s") % fname)
1870 raise util.Abort(_("empty or missing revlog for %s") % fname)
1873 nodeiter = gennodelst(filerevlog)
1871 nodeiter = gennodelst(filerevlog)
1874 nodeiter = list(nodeiter)
1872 nodeiter = list(nodeiter)
1875 if nodeiter:
1873 if nodeiter:
1876 yield changegroup.chunkheader(len(fname))
1874 yield changegroup.chunkheader(len(fname))
1877 yield fname
1875 yield fname
1878 lookup = lookuprevlink_func(filerevlog)
1876 lookup = lookuprevlink_func(filerevlog)
1879 for chnk in filerevlog.group(nodeiter, lookup):
1877 for chnk in filerevlog.group(nodeiter, lookup):
1880 yield chnk
1878 yield chnk
1881
1879
1882 yield changegroup.closechunk()
1880 yield changegroup.closechunk()
1883
1881
1884 if nodes:
1882 if nodes:
1885 self.hook('outgoing', node=hex(nodes[0]), source=source)
1883 self.hook('outgoing', node=hex(nodes[0]), source=source)
1886
1884
1887 return util.chunkbuffer(gengroup())
1885 return util.chunkbuffer(gengroup())
1888
1886
1889 def addchangegroup(self, source, srctype, url, emptyok=False):
1887 def addchangegroup(self, source, srctype, url, emptyok=False):
1890 """add changegroup to repo.
1888 """add changegroup to repo.
1891
1889
1892 return values:
1890 return values:
1893 - nothing changed or no source: 0
1891 - nothing changed or no source: 0
1894 - more heads than before: 1+added heads (2..n)
1892 - more heads than before: 1+added heads (2..n)
1895 - less heads than before: -1-removed heads (-2..-n)
1893 - less heads than before: -1-removed heads (-2..-n)
1896 - number of heads stays the same: 1
1894 - number of heads stays the same: 1
1897 """
1895 """
1898 def csmap(x):
1896 def csmap(x):
1899 self.ui.debug(_("add changeset %s\n") % short(x))
1897 self.ui.debug(_("add changeset %s\n") % short(x))
1900 return len(cl)
1898 return len(cl)
1901
1899
1902 def revmap(x):
1900 def revmap(x):
1903 return cl.rev(x)
1901 return cl.rev(x)
1904
1902
1905 if not source:
1903 if not source:
1906 return 0
1904 return 0
1907
1905
1908 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1906 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1909
1907
1910 changesets = files = revisions = 0
1908 changesets = files = revisions = 0
1911
1909
1912 # write changelog data to temp files so concurrent readers will not see
1910 # write changelog data to temp files so concurrent readers will not see
1913 # inconsistent view
1911 # inconsistent view
1914 cl = self.changelog
1912 cl = self.changelog
1915 cl.delayupdate()
1913 cl.delayupdate()
1916 oldheads = len(cl.heads())
1914 oldheads = len(cl.heads())
1917
1915
1918 tr = self.transaction()
1916 tr = self.transaction()
1919 try:
1917 try:
1920 trp = weakref.proxy(tr)
1918 trp = weakref.proxy(tr)
1921 # pull off the changeset group
1919 # pull off the changeset group
1922 self.ui.status(_("adding changesets\n"))
1920 self.ui.status(_("adding changesets\n"))
1923 cor = len(cl) - 1
1921 cor = len(cl) - 1
1924 chunkiter = changegroup.chunkiter(source)
1922 chunkiter = changegroup.chunkiter(source)
1925 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1923 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1926 raise util.Abort(_("received changelog group is empty"))
1924 raise util.Abort(_("received changelog group is empty"))
1927 cnr = len(cl) - 1
1925 cnr = len(cl) - 1
1928 changesets = cnr - cor
1926 changesets = cnr - cor
1929
1927
1930 # pull off the manifest group
1928 # pull off the manifest group
1931 self.ui.status(_("adding manifests\n"))
1929 self.ui.status(_("adding manifests\n"))
1932 chunkiter = changegroup.chunkiter(source)
1930 chunkiter = changegroup.chunkiter(source)
1933 # no need to check for empty manifest group here:
1931 # no need to check for empty manifest group here:
1934 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1932 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1935 # no new manifest will be created and the manifest group will
1933 # no new manifest will be created and the manifest group will
1936 # be empty during the pull
1934 # be empty during the pull
1937 self.manifest.addgroup(chunkiter, revmap, trp)
1935 self.manifest.addgroup(chunkiter, revmap, trp)
1938
1936
1939 # process the files
1937 # process the files
1940 self.ui.status(_("adding file changes\n"))
1938 self.ui.status(_("adding file changes\n"))
1941 while 1:
1939 while 1:
1942 f = changegroup.getchunk(source)
1940 f = changegroup.getchunk(source)
1943 if not f:
1941 if not f:
1944 break
1942 break
1945 self.ui.debug(_("adding %s revisions\n") % f)
1943 self.ui.debug(_("adding %s revisions\n") % f)
1946 fl = self.file(f)
1944 fl = self.file(f)
1947 o = len(fl)
1945 o = len(fl)
1948 chunkiter = changegroup.chunkiter(source)
1946 chunkiter = changegroup.chunkiter(source)
1949 if fl.addgroup(chunkiter, revmap, trp) is None:
1947 if fl.addgroup(chunkiter, revmap, trp) is None:
1950 raise util.Abort(_("received file revlog group is empty"))
1948 raise util.Abort(_("received file revlog group is empty"))
1951 revisions += len(fl) - o
1949 revisions += len(fl) - o
1952 files += 1
1950 files += 1
1953
1951
1954 # make changelog see real files again
1952 # make changelog see real files again
1955 cl.finalize(trp)
1953 cl.finalize(trp)
1956
1954
1957 newheads = len(self.changelog.heads())
1955 newheads = len(self.changelog.heads())
1958 heads = ""
1956 heads = ""
1959 if oldheads and newheads != oldheads:
1957 if oldheads and newheads != oldheads:
1960 heads = _(" (%+d heads)") % (newheads - oldheads)
1958 heads = _(" (%+d heads)") % (newheads - oldheads)
1961
1959
1962 self.ui.status(_("added %d changesets"
1960 self.ui.status(_("added %d changesets"
1963 " with %d changes to %d files%s\n")
1961 " with %d changes to %d files%s\n")
1964 % (changesets, revisions, files, heads))
1962 % (changesets, revisions, files, heads))
1965
1963
1966 if changesets > 0:
1964 if changesets > 0:
1967 self.hook('pretxnchangegroup', throw=True,
1965 self.hook('pretxnchangegroup', throw=True,
1968 node=hex(self.changelog.node(cor+1)), source=srctype,
1966 node=hex(self.changelog.node(cor+1)), source=srctype,
1969 url=url)
1967 url=url)
1970
1968
1971 tr.close()
1969 tr.close()
1972 finally:
1970 finally:
1973 del tr
1971 del tr
1974
1972
1975 if changesets > 0:
1973 if changesets > 0:
1976 # forcefully update the on-disk branch cache
1974 # forcefully update the on-disk branch cache
1977 self.ui.debug(_("updating the branch cache\n"))
1975 self.ui.debug(_("updating the branch cache\n"))
1978 self.branchtags()
1976 self.branchtags()
1979 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1977 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1980 source=srctype, url=url)
1978 source=srctype, url=url)
1981
1979
1982 for i in xrange(cor + 1, cnr + 1):
1980 for i in xrange(cor + 1, cnr + 1):
1983 self.hook("incoming", node=hex(self.changelog.node(i)),
1981 self.hook("incoming", node=hex(self.changelog.node(i)),
1984 source=srctype, url=url)
1982 source=srctype, url=url)
1985
1983
1986 # never return 0 here:
1984 # never return 0 here:
1987 if newheads < oldheads:
1985 if newheads < oldheads:
1988 return newheads - oldheads - 1
1986 return newheads - oldheads - 1
1989 else:
1987 else:
1990 return newheads - oldheads + 1
1988 return newheads - oldheads + 1
1991
1989
1992
1990
1993 def stream_in(self, remote):
1991 def stream_in(self, remote):
1994 fp = remote.stream_out()
1992 fp = remote.stream_out()
1995 l = fp.readline()
1993 l = fp.readline()
1996 try:
1994 try:
1997 resp = int(l)
1995 resp = int(l)
1998 except ValueError:
1996 except ValueError:
1999 raise util.UnexpectedOutput(
1997 raise util.UnexpectedOutput(
2000 _('Unexpected response from remote server:'), l)
1998 _('Unexpected response from remote server:'), l)
2001 if resp == 1:
1999 if resp == 1:
2002 raise util.Abort(_('operation forbidden by server'))
2000 raise util.Abort(_('operation forbidden by server'))
2003 elif resp == 2:
2001 elif resp == 2:
2004 raise util.Abort(_('locking the remote repository failed'))
2002 raise util.Abort(_('locking the remote repository failed'))
2005 elif resp != 0:
2003 elif resp != 0:
2006 raise util.Abort(_('the server sent an unknown error code'))
2004 raise util.Abort(_('the server sent an unknown error code'))
2007 self.ui.status(_('streaming all changes\n'))
2005 self.ui.status(_('streaming all changes\n'))
2008 l = fp.readline()
2006 l = fp.readline()
2009 try:
2007 try:
2010 total_files, total_bytes = map(int, l.split(' ', 1))
2008 total_files, total_bytes = map(int, l.split(' ', 1))
2011 except (ValueError, TypeError):
2009 except (ValueError, TypeError):
2012 raise util.UnexpectedOutput(
2010 raise util.UnexpectedOutput(
2013 _('Unexpected response from remote server:'), l)
2011 _('Unexpected response from remote server:'), l)
2014 self.ui.status(_('%d files to transfer, %s of data\n') %
2012 self.ui.status(_('%d files to transfer, %s of data\n') %
2015 (total_files, util.bytecount(total_bytes)))
2013 (total_files, util.bytecount(total_bytes)))
2016 start = time.time()
2014 start = time.time()
2017 for i in xrange(total_files):
2015 for i in xrange(total_files):
2018 # XXX doesn't support '\n' or '\r' in filenames
2016 # XXX doesn't support '\n' or '\r' in filenames
2019 l = fp.readline()
2017 l = fp.readline()
2020 try:
2018 try:
2021 name, size = l.split('\0', 1)
2019 name, size = l.split('\0', 1)
2022 size = int(size)
2020 size = int(size)
2023 except ValueError, TypeError:
2021 except ValueError, TypeError:
2024 raise util.UnexpectedOutput(
2022 raise util.UnexpectedOutput(
2025 _('Unexpected response from remote server:'), l)
2023 _('Unexpected response from remote server:'), l)
2026 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2024 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2027 ofp = self.sopener(name, 'w')
2025 ofp = self.sopener(name, 'w')
2028 for chunk in util.filechunkiter(fp, limit=size):
2026 for chunk in util.filechunkiter(fp, limit=size):
2029 ofp.write(chunk)
2027 ofp.write(chunk)
2030 ofp.close()
2028 ofp.close()
2031 elapsed = time.time() - start
2029 elapsed = time.time() - start
2032 if elapsed <= 0:
2030 if elapsed <= 0:
2033 elapsed = 0.001
2031 elapsed = 0.001
2034 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2032 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2035 (util.bytecount(total_bytes), elapsed,
2033 (util.bytecount(total_bytes), elapsed,
2036 util.bytecount(total_bytes / elapsed)))
2034 util.bytecount(total_bytes / elapsed)))
2037 self.invalidate()
2035 self.invalidate()
2038 return len(self.heads()) + 1
2036 return len(self.heads()) + 1
2039
2037
2040 def clone(self, remote, heads=[], stream=False):
2038 def clone(self, remote, heads=[], stream=False):
2041 '''clone remote repository.
2039 '''clone remote repository.
2042
2040
2043 keyword arguments:
2041 keyword arguments:
2044 heads: list of revs to clone (forces use of pull)
2042 heads: list of revs to clone (forces use of pull)
2045 stream: use streaming clone if possible'''
2043 stream: use streaming clone if possible'''
2046
2044
2047 # now, all clients that can request uncompressed clones can
2045 # now, all clients that can request uncompressed clones can
2048 # read repo formats supported by all servers that can serve
2046 # read repo formats supported by all servers that can serve
2049 # them.
2047 # them.
2050
2048
2051 # if revlog format changes, client will have to check version
2049 # if revlog format changes, client will have to check version
2052 # and format flags on "stream" capability, and use
2050 # and format flags on "stream" capability, and use
2053 # uncompressed only if compatible.
2051 # uncompressed only if compatible.
2054
2052
2055 if stream and not heads and remote.capable('stream'):
2053 if stream and not heads and remote.capable('stream'):
2056 return self.stream_in(remote)
2054 return self.stream_in(remote)
2057 return self.pull(remote, heads)
2055 return self.pull(remote, heads)
2058
2056
2059 def storefiles(self):
2057 def storefiles(self):
2060 '''get all *.i and *.d files in the store
2058 '''get all *.i and *.d files in the store
2061
2059
2062 Returns (list of (filename, size), total_bytes)'''
2060 Returns (list of (filename, size), total_bytes)'''
2063
2061
2064 lock = None
2062 lock = None
2065 try:
2063 try:
2066 self.ui.debug('scanning\n')
2064 self.ui.debug('scanning\n')
2067 entries = []
2065 entries = []
2068 total_bytes = 0
2066 total_bytes = 0
2069 # get consistent snapshot of repo, lock during scan
2067 # get consistent snapshot of repo, lock during scan
2070 lock = self.lock()
2068 lock = self.lock()
2071 for name, size in self.store.walk():
2069 for name, size in self.store.walk():
2072 entries.append((name, size))
2070 entries.append((name, size))
2073 total_bytes += size
2071 total_bytes += size
2074 return entries, total_bytes
2072 return entries, total_bytes
2075 finally:
2073 finally:
2076 del lock
2074 del lock
2077
2075
2078 # used to avoid circular references so destructors work
2076 # used to avoid circular references so destructors work
2079 def aftertrans(files):
2077 def aftertrans(files):
2080 renamefiles = [tuple(t) for t in files]
2078 renamefiles = [tuple(t) for t in files]
2081 def a():
2079 def a():
2082 for src, dest in renamefiles:
2080 for src, dest in renamefiles:
2083 util.rename(src, dest)
2081 util.rename(src, dest)
2084 return a
2082 return a
2085
2083
2086 def instance(ui, path, create):
2084 def instance(ui, path, create):
2087 return localrepository(ui, util.drop_scheme('file', path), create)
2085 return localrepository(ui, util.drop_scheme('file', path), create)
2088
2086
2089 def islocal(path):
2087 def islocal(path):
2090 return True
2088 return True
General Comments 0
You need to be logged in to leave comments. Login now