##// END OF EJS Templates
merge with crew-stable
Alexis S. L. Carvalho -
r4497:22ebd6ee merge default
parent child Browse files
Show More
@@ -1,312 +1,312 b''
1 # bisect extension for mercurial
1 # bisect extension for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
3 # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
4 # Inspired by git bisect, extension skeleton taken from mq.py.
4 # Inspired by git bisect, extension skeleton taken from mq.py.
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 from mercurial.i18n import _
9 from mercurial.i18n import _
10 from mercurial import hg, util, commands, cmdutil
10 from mercurial import hg, util, commands, cmdutil
11 import os, sys, sets
11 import os, sys, sets
12
12
13 versionstr = "0.0.3"
13 versionstr = "0.0.3"
14
14
15 def lookup_rev(ui, repo, rev=None):
15 def lookup_rev(ui, repo, rev=None):
16 """returns rev or the checked-out revision if rev is None"""
16 """returns rev or the checked-out revision if rev is None"""
17 if not rev is None:
17 if not rev is None:
18 return repo.lookup(rev)
18 return repo.lookup(rev)
19 parents = [p for p in repo.dirstate.parents() if p != hg.nullid]
19 parents = [p for p in repo.dirstate.parents() if p != hg.nullid]
20 if len(parents) != 1:
20 if len(parents) != 1:
21 raise util.Abort(_("unexpected number of parents, "
21 raise util.Abort(_("unexpected number of parents, "
22 "please commit or revert"))
22 "please commit or revert"))
23 return parents.pop()
23 return parents.pop()
24
24
25 def check_clean(ui, repo):
25 def check_clean(ui, repo):
26 modified, added, removed, deleted, unknown = repo.status()[:5]
26 modified, added, removed, deleted, unknown = repo.status()[:5]
27 if modified or added or removed:
27 if modified or added or removed:
28 ui.warn("Repository is not clean, please commit or revert\n")
28 ui.warn("Repository is not clean, please commit or revert\n")
29 sys.exit(1)
29 sys.exit(1)
30
30
31 class bisect(object):
31 class bisect(object):
32 """dichotomic search in the DAG of changesets"""
32 """dichotomic search in the DAG of changesets"""
33 def __init__(self, ui, repo):
33 def __init__(self, ui, repo):
34 self.repo = repo
34 self.repo = repo
35 self.path = repo.join("bisect")
35 self.path = repo.join("bisect")
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.ui = ui
37 self.ui = ui
38 self.goodrevs = []
38 self.goodrevs = []
39 self.badrev = None
39 self.badrev = None
40 self.good_dirty = 0
40 self.good_dirty = 0
41 self.bad_dirty = 0
41 self.bad_dirty = 0
42 self.good_path = "good"
42 self.good_path = "good"
43 self.bad_path = "bad"
43 self.bad_path = "bad"
44
44
45 if os.path.exists(os.path.join(self.path, self.good_path)):
45 if os.path.exists(os.path.join(self.path, self.good_path)):
46 self.goodrevs = self.opener(self.good_path).read().splitlines()
46 self.goodrevs = self.opener(self.good_path).read().splitlines()
47 self.goodrevs = [hg.bin(x) for x in self.goodrevs]
47 self.goodrevs = [hg.bin(x) for x in self.goodrevs]
48 if os.path.exists(os.path.join(self.path, self.bad_path)):
48 if os.path.exists(os.path.join(self.path, self.bad_path)):
49 r = self.opener(self.bad_path).read().splitlines()
49 r = self.opener(self.bad_path).read().splitlines()
50 if r:
50 if r:
51 self.badrev = hg.bin(r.pop(0))
51 self.badrev = hg.bin(r.pop(0))
52
52
53 def write(self):
53 def write(self):
54 if not os.path.isdir(self.path):
54 if not os.path.isdir(self.path):
55 return
55 return
56 f = self.opener(self.good_path, "w")
56 f = self.opener(self.good_path, "w")
57 f.write("\n".join([hg.hex(r) for r in self.goodrevs]))
57 f.write("\n".join([hg.hex(r) for r in self.goodrevs]))
58 if len(self.goodrevs) > 0:
58 if len(self.goodrevs) > 0:
59 f.write("\n")
59 f.write("\n")
60 f = self.opener(self.bad_path, "w")
60 f = self.opener(self.bad_path, "w")
61 if self.badrev:
61 if self.badrev:
62 f.write(hg.hex(self.badrev) + "\n")
62 f.write(hg.hex(self.badrev) + "\n")
63
63
64 def init(self):
64 def init(self):
65 """start a new bisection"""
65 """start a new bisection"""
66 if os.path.isdir(self.path):
66 if os.path.isdir(self.path):
67 raise util.Abort(_("bisect directory already exists\n"))
67 raise util.Abort(_("bisect directory already exists\n"))
68 os.mkdir(self.path)
68 os.mkdir(self.path)
69 check_clean(self.ui, self.repo)
69 check_clean(self.ui, self.repo)
70 return 0
70 return 0
71
71
72 def reset(self):
72 def reset(self):
73 """finish a bisection"""
73 """finish a bisection"""
74 if os.path.isdir(self.path):
74 if os.path.isdir(self.path):
75 sl = [os.path.join(self.path, p)
75 sl = [os.path.join(self.path, p)
76 for p in [self.bad_path, self.good_path]]
76 for p in [self.bad_path, self.good_path]]
77 for s in sl:
77 for s in sl:
78 if os.path.exists(s):
78 if os.path.exists(s):
79 os.unlink(s)
79 os.unlink(s)
80 os.rmdir(self.path)
80 os.rmdir(self.path)
81 # Not sure about this
81 # Not sure about this
82 #self.ui.write("Going back to tip\n")
82 #self.ui.write("Going back to tip\n")
83 #self.repo.update(self.repo.changelog.tip())
83 #self.repo.update(self.repo.changelog.tip())
84 return 1
84 return 1
85
85
86 def num_ancestors(self, head=None, stop=None):
86 def num_ancestors(self, head=None, stop=None):
87 """
87 """
88 returns a dict with the mapping:
88 returns a dict with the mapping:
89 node -> number of ancestors (self included)
89 node -> number of ancestors (self included)
90 for all nodes who are ancestor of head and
90 for all nodes who are ancestor of head and
91 not in stop.
91 not in stop.
92 """
92 """
93 if head is None:
93 if head is None:
94 head = self.badrev
94 head = self.badrev
95 return self.__ancestors_and_nb_ancestors(head, stop)[1]
95 return self.__ancestors_and_nb_ancestors(head, stop)[1]
96
96
97 def ancestors(self, head=None, stop=None):
97 def ancestors(self, head=None, stop=None):
98 """
98 """
99 returns the set of the ancestors of head (self included)
99 returns the set of the ancestors of head (self included)
100 who are not in stop.
100 who are not in stop.
101 """
101 """
102 if head is None:
102 if head is None:
103 head = self.badrev
103 head = self.badrev
104 return self.__ancestors_and_nb_ancestors(head, stop)[0]
104 return self.__ancestors_and_nb_ancestors(head, stop)[0]
105
105
106 def __ancestors_and_nb_ancestors(self, head, stop=None):
106 def __ancestors_and_nb_ancestors(self, head, stop=None):
107 """
107 """
108 if stop is None then ancestors of goodrevs are used as
108 if stop is None then ancestors of goodrevs are used as
109 lower limit.
109 lower limit.
110
110
111 returns (anc, n_child) where anc is the set of the ancestors of head
111 returns (anc, n_child) where anc is the set of the ancestors of head
112 and n_child is a dictionary with the following mapping:
112 and n_child is a dictionary with the following mapping:
113 node -> number of ancestors (self included)
113 node -> number of ancestors (self included)
114 """
114 """
115 cl = self.repo.changelog
115 cl = self.repo.changelog
116 if not stop:
116 if not stop:
117 stop = sets.Set([])
117 stop = sets.Set([])
118 for i in xrange(len(self.goodrevs)-1, -1, -1):
118 for i in xrange(len(self.goodrevs)-1, -1, -1):
119 g = self.goodrevs[i]
119 g = self.goodrevs[i]
120 if g in stop:
120 if g in stop:
121 continue
121 continue
122 stop.update(cl.reachable(g))
122 stop.update(cl.reachable(g))
123 def num_children(a):
123 def num_children(a):
124 """
124 """
125 returns a dictionnary with the following mapping
125 returns a dictionnary with the following mapping
126 node -> [number of children, empty set]
126 node -> [number of children, empty set]
127 """
127 """
128 d = {a: [0, sets.Set([])]}
128 d = {a: [0, sets.Set([])]}
129 for i in xrange(cl.rev(a)+1):
129 for i in xrange(cl.rev(a)+1):
130 n = cl.node(i)
130 n = cl.node(i)
131 if not d.has_key(n):
131 if not d.has_key(n):
132 d[n] = [0, sets.Set([])]
132 d[n] = [0, sets.Set([])]
133 parents = [p for p in cl.parents(n) if p != hg.nullid]
133 parents = [p for p in cl.parents(n) if p != hg.nullid]
134 for p in parents:
134 for p in parents:
135 d[p][0] += 1
135 d[p][0] += 1
136 return d
136 return d
137
137
138 if head in stop:
138 if head in stop:
139 raise util.Abort(_("Unconsistent state, %s:%s is good and bad")
139 raise util.Abort(_("Inconsistent state, %s:%s is good and bad")
140 % (cl.rev(head), hg.short(head)))
140 % (cl.rev(head), hg.short(head)))
141 n_child = num_children(head)
141 n_child = num_children(head)
142 for i in xrange(cl.rev(head)+1):
142 for i in xrange(cl.rev(head)+1):
143 n = cl.node(i)
143 n = cl.node(i)
144 parents = [p for p in cl.parents(n) if p != hg.nullid]
144 parents = [p for p in cl.parents(n) if p != hg.nullid]
145 for p in parents:
145 for p in parents:
146 n_child[p][0] -= 1
146 n_child[p][0] -= 1
147 if not n in stop:
147 if not n in stop:
148 n_child[n][1].union_update(n_child[p][1])
148 n_child[n][1].union_update(n_child[p][1])
149 if n_child[p][0] == 0:
149 if n_child[p][0] == 0:
150 n_child[p] = len(n_child[p][1])
150 n_child[p] = len(n_child[p][1])
151 if not n in stop:
151 if not n in stop:
152 n_child[n][1].add(n)
152 n_child[n][1].add(n)
153 if n_child[n][0] == 0:
153 if n_child[n][0] == 0:
154 if n == head:
154 if n == head:
155 anc = n_child[n][1]
155 anc = n_child[n][1]
156 n_child[n] = len(n_child[n][1])
156 n_child[n] = len(n_child[n][1])
157 return anc, n_child
157 return anc, n_child
158
158
159 def next(self):
159 def next(self):
160 if not self.badrev:
160 if not self.badrev:
161 raise util.Abort(_("You should give at least one bad revision"))
161 raise util.Abort(_("You should give at least one bad revision"))
162 if not self.goodrevs:
162 if not self.goodrevs:
163 self.ui.warn(_("No good revision given\n"))
163 self.ui.warn(_("No good revision given\n"))
164 self.ui.warn(_("Marking the first revision as good\n"))
164 self.ui.warn(_("Marking the first revision as good\n"))
165 ancestors, num_ancestors = self.__ancestors_and_nb_ancestors(
165 ancestors, num_ancestors = self.__ancestors_and_nb_ancestors(
166 self.badrev)
166 self.badrev)
167 tot = len(ancestors)
167 tot = len(ancestors)
168 if tot == 1:
168 if tot == 1:
169 if ancestors.pop() != self.badrev:
169 if ancestors.pop() != self.badrev:
170 raise util.Abort(_("Could not find the first bad revision"))
170 raise util.Abort(_("Could not find the first bad revision"))
171 self.ui.write(_("The first bad revision is:\n"))
171 self.ui.write(_("The first bad revision is:\n"))
172 displayer = cmdutil.show_changeset(self.ui, self.repo, {})
172 displayer = cmdutil.show_changeset(self.ui, self.repo, {})
173 displayer.show(changenode=self.badrev)
173 displayer.show(changenode=self.badrev)
174 return None
174 return None
175 best_rev = None
175 best_rev = None
176 best_len = -1
176 best_len = -1
177 for n in ancestors:
177 for n in ancestors:
178 l = num_ancestors[n]
178 l = num_ancestors[n]
179 l = min(l, tot - l)
179 l = min(l, tot - l)
180 if l > best_len:
180 if l > best_len:
181 best_len = l
181 best_len = l
182 best_rev = n
182 best_rev = n
183 assert best_rev is not None
183 assert best_rev is not None
184 nb_tests = 0
184 nb_tests = 0
185 q, r = divmod(tot, 2)
185 q, r = divmod(tot, 2)
186 while q:
186 while q:
187 nb_tests += 1
187 nb_tests += 1
188 q, r = divmod(q, 2)
188 q, r = divmod(q, 2)
189 msg = _("Testing changeset %s:%s (%s changesets remaining, "
189 msg = _("Testing changeset %s:%s (%s changesets remaining, "
190 "~%s tests)\n") % (self.repo.changelog.rev(best_rev),
190 "~%s tests)\n") % (self.repo.changelog.rev(best_rev),
191 hg.short(best_rev), tot, nb_tests)
191 hg.short(best_rev), tot, nb_tests)
192 self.ui.write(msg)
192 self.ui.write(msg)
193 return best_rev
193 return best_rev
194
194
195 def autonext(self):
195 def autonext(self):
196 """find and update to the next revision to test"""
196 """find and update to the next revision to test"""
197 check_clean(self.ui, self.repo)
197 check_clean(self.ui, self.repo)
198 rev = self.next()
198 rev = self.next()
199 if rev is not None:
199 if rev is not None:
200 return hg.clean(self.repo, rev)
200 return hg.clean(self.repo, rev)
201
201
202 def good(self, rev):
202 def good(self, rev):
203 self.goodrevs.append(rev)
203 self.goodrevs.append(rev)
204
204
205 def autogood(self, rev=None):
205 def autogood(self, rev=None):
206 """mark revision as good and update to the next revision to test"""
206 """mark revision as good and update to the next revision to test"""
207 check_clean(self.ui, self.repo)
207 check_clean(self.ui, self.repo)
208 rev = lookup_rev(self.ui, self.repo, rev)
208 rev = lookup_rev(self.ui, self.repo, rev)
209 self.good(rev)
209 self.good(rev)
210 if self.badrev:
210 if self.badrev:
211 return self.autonext()
211 return self.autonext()
212
212
213 def bad(self, rev):
213 def bad(self, rev):
214 self.badrev = rev
214 self.badrev = rev
215
215
216 def autobad(self, rev=None):
216 def autobad(self, rev=None):
217 """mark revision as bad and update to the next revision to test"""
217 """mark revision as bad and update to the next revision to test"""
218 check_clean(self.ui, self.repo)
218 check_clean(self.ui, self.repo)
219 rev = lookup_rev(self.ui, self.repo, rev)
219 rev = lookup_rev(self.ui, self.repo, rev)
220 self.bad(rev)
220 self.bad(rev)
221 if self.goodrevs:
221 if self.goodrevs:
222 self.autonext()
222 self.autonext()
223
223
224 # should we put it in the class ?
224 # should we put it in the class ?
225 def test(ui, repo, rev):
225 def test(ui, repo, rev):
226 """test the bisection code"""
226 """test the bisection code"""
227 b = bisect(ui, repo)
227 b = bisect(ui, repo)
228 rev = repo.lookup(rev)
228 rev = repo.lookup(rev)
229 ui.write("testing with rev %s\n" % hg.hex(rev))
229 ui.write("testing with rev %s\n" % hg.hex(rev))
230 anc = b.ancestors()
230 anc = b.ancestors()
231 while len(anc) > 1:
231 while len(anc) > 1:
232 if not rev in anc:
232 if not rev in anc:
233 ui.warn("failure while bisecting\n")
233 ui.warn("failure while bisecting\n")
234 sys.exit(1)
234 sys.exit(1)
235 ui.write("it worked :)\n")
235 ui.write("it worked :)\n")
236 new_rev = b.next()
236 new_rev = b.next()
237 ui.write("choosing if good or bad\n")
237 ui.write("choosing if good or bad\n")
238 if rev in b.ancestors(head=new_rev):
238 if rev in b.ancestors(head=new_rev):
239 b.bad(new_rev)
239 b.bad(new_rev)
240 ui.write("it is bad\n")
240 ui.write("it is bad\n")
241 else:
241 else:
242 b.good(new_rev)
242 b.good(new_rev)
243 ui.write("it is good\n")
243 ui.write("it is good\n")
244 anc = b.ancestors()
244 anc = b.ancestors()
245 #repo.update(new_rev, force=True)
245 #repo.update(new_rev, force=True)
246 for v in anc:
246 for v in anc:
247 if v != rev:
247 if v != rev:
248 ui.warn("fail to found cset! :(\n")
248 ui.warn("fail to found cset! :(\n")
249 return 1
249 return 1
250 ui.write("Found bad cset: %s\n" % hg.hex(b.badrev))
250 ui.write("Found bad cset: %s\n" % hg.hex(b.badrev))
251 ui.write("Everything is ok :)\n")
251 ui.write("Everything is ok :)\n")
252 return 0
252 return 0
253
253
254 def bisect_run(ui, repo, cmd=None, *args):
254 def bisect_run(ui, repo, cmd=None, *args):
255 """Dichotomic search in the DAG of changesets
255 """Dichotomic search in the DAG of changesets
256
256
257 This extension helps to find changesets which cause problems.
257 This extension helps to find changesets which cause problems.
258 To use, mark the earliest changeset you know introduces the problem
258 To use, mark the earliest changeset you know introduces the problem
259 as bad, then mark the latest changeset which is free from the problem
259 as bad, then mark the latest changeset which is free from the problem
260 as good. Bisect will update your working directory to a revision for
260 as good. Bisect will update your working directory to a revision for
261 testing. Once you have performed tests, mark the working directory
261 testing. Once you have performed tests, mark the working directory
262 as bad or good and bisect will either update to another candidate
262 as bad or good and bisect will either update to another candidate
263 changeset or announce that it has found the bad revision.
263 changeset or announce that it has found the bad revision.
264
264
265 Note: bisect expects bad revisions to be descendants of good revisions.
265 Note: bisect expects bad revisions to be descendants of good revisions.
266 If you are looking for the point at which a problem was fixed, then make
266 If you are looking for the point at which a problem was fixed, then make
267 the problem-free state "bad" and the problematic state "good."
267 the problem-free state "bad" and the problematic state "good."
268
268
269 For subcommands see "hg bisect help\"
269 For subcommands see "hg bisect help\"
270 """
270 """
271 def help_(cmd=None, *args):
271 def help_(cmd=None, *args):
272 """show help for a given bisect subcommand or all subcommands"""
272 """show help for a given bisect subcommand or all subcommands"""
273 cmdtable = bisectcmdtable
273 cmdtable = bisectcmdtable
274 if cmd:
274 if cmd:
275 doc = cmdtable[cmd][0].__doc__
275 doc = cmdtable[cmd][0].__doc__
276 synopsis = cmdtable[cmd][2]
276 synopsis = cmdtable[cmd][2]
277 ui.write(synopsis + "\n")
277 ui.write(synopsis + "\n")
278 ui.write("\n" + doc + "\n")
278 ui.write("\n" + doc + "\n")
279 return
279 return
280 ui.write(_("list of subcommands for the bisect extension\n\n"))
280 ui.write(_("list of subcommands for the bisect extension\n\n"))
281 cmds = cmdtable.keys()
281 cmds = cmdtable.keys()
282 cmds.sort()
282 cmds.sort()
283 m = max([len(c) for c in cmds])
283 m = max([len(c) for c in cmds])
284 for cmd in cmds:
284 for cmd in cmds:
285 doc = cmdtable[cmd][0].__doc__.splitlines(0)[0].rstrip()
285 doc = cmdtable[cmd][0].__doc__.splitlines(0)[0].rstrip()
286 ui.write(" %-*s %s\n" % (m, cmd, doc))
286 ui.write(" %-*s %s\n" % (m, cmd, doc))
287
287
288 b = bisect(ui, repo)
288 b = bisect(ui, repo)
289 bisectcmdtable = {
289 bisectcmdtable = {
290 "init": (b.init, 0, _("hg bisect init")),
290 "init": (b.init, 0, _("hg bisect init")),
291 "bad": (b.autobad, 1, _("hg bisect bad [<rev>]")),
291 "bad": (b.autobad, 1, _("hg bisect bad [<rev>]")),
292 "good": (b.autogood, 1, _("hg bisect good [<rev>]")),
292 "good": (b.autogood, 1, _("hg bisect good [<rev>]")),
293 "next": (b.autonext, 0, _("hg bisect next")),
293 "next": (b.autonext, 0, _("hg bisect next")),
294 "reset": (b.reset, 0, _("hg bisect reset")),
294 "reset": (b.reset, 0, _("hg bisect reset")),
295 "help": (help_, 1, _("hg bisect help [<subcommand>]")),
295 "help": (help_, 1, _("hg bisect help [<subcommand>]")),
296 }
296 }
297
297
298 if not bisectcmdtable.has_key(cmd):
298 if not bisectcmdtable.has_key(cmd):
299 ui.warn(_("bisect: Unknown sub-command\n"))
299 ui.warn(_("bisect: Unknown sub-command\n"))
300 return help_()
300 return help_()
301 if len(args) > bisectcmdtable[cmd][1]:
301 if len(args) > bisectcmdtable[cmd][1]:
302 ui.warn(_("bisect: Too many arguments\n"))
302 ui.warn(_("bisect: Too many arguments\n"))
303 return help_()
303 return help_()
304 try:
304 try:
305 return bisectcmdtable[cmd][0](*args)
305 return bisectcmdtable[cmd][0](*args)
306 finally:
306 finally:
307 b.write()
307 b.write()
308
308
309 cmdtable = {
309 cmdtable = {
310 "bisect": (bisect_run, [], _("hg bisect [help|init|reset|next|good|bad]")),
310 "bisect": (bisect_run, [], _("hg bisect [help|init|reset|next|good|bad]")),
311 #"bisect-test": (test, [], "hg bisect-test rev"),
311 #"bisect-test": (test, [], "hg bisect-test rev"),
312 }
312 }
@@ -1,72 +1,73 b''
1 # hgweb/wsgicgi.py - CGI->WSGI translator
1 # hgweb/wsgicgi.py - CGI->WSGI translator
2 #
2 #
3 # Copyright 2006 Eric Hopper <hopper@omnifarious.org>
3 # Copyright 2006 Eric Hopper <hopper@omnifarious.org>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7 #
7 #
8 # This was originally copied from the public domain code at
8 # This was originally copied from the public domain code at
9 # http://www.python.org/dev/peps/pep-0333/#the-server-gateway-side
9 # http://www.python.org/dev/peps/pep-0333/#the-server-gateway-side
10
10
11 import os, sys
11 import os, sys
12 from mercurial import util
12 from mercurial import util
13
13
14 def launch(application):
14 def launch(application):
15 util.set_binary(sys.stdin)
15 util.set_binary(sys.stdout)
16 util.set_binary(sys.stdout)
16
17
17 environ = dict(os.environ.items())
18 environ = dict(os.environ.items())
18 environ['wsgi.input'] = sys.stdin
19 environ['wsgi.input'] = sys.stdin
19 environ['wsgi.errors'] = sys.stderr
20 environ['wsgi.errors'] = sys.stderr
20 environ['wsgi.version'] = (1, 0)
21 environ['wsgi.version'] = (1, 0)
21 environ['wsgi.multithread'] = False
22 environ['wsgi.multithread'] = False
22 environ['wsgi.multiprocess'] = True
23 environ['wsgi.multiprocess'] = True
23 environ['wsgi.run_once'] = True
24 environ['wsgi.run_once'] = True
24
25
25 if environ.get('HTTPS','off') in ('on','1'):
26 if environ.get('HTTPS','off') in ('on','1'):
26 environ['wsgi.url_scheme'] = 'https'
27 environ['wsgi.url_scheme'] = 'https'
27 else:
28 else:
28 environ['wsgi.url_scheme'] = 'http'
29 environ['wsgi.url_scheme'] = 'http'
29
30
30 headers_set = []
31 headers_set = []
31 headers_sent = []
32 headers_sent = []
32 out = sys.stdout
33 out = sys.stdout
33
34
34 def write(data):
35 def write(data):
35 if not headers_set:
36 if not headers_set:
36 raise AssertionError("write() before start_response()")
37 raise AssertionError("write() before start_response()")
37
38
38 elif not headers_sent:
39 elif not headers_sent:
39 # Before the first output, send the stored headers
40 # Before the first output, send the stored headers
40 status, response_headers = headers_sent[:] = headers_set
41 status, response_headers = headers_sent[:] = headers_set
41 out.write('Status: %s\r\n' % status)
42 out.write('Status: %s\r\n' % status)
42 for header in response_headers:
43 for header in response_headers:
43 out.write('%s: %s\r\n' % header)
44 out.write('%s: %s\r\n' % header)
44 out.write('\r\n')
45 out.write('\r\n')
45
46
46 out.write(data)
47 out.write(data)
47 out.flush()
48 out.flush()
48
49
49 def start_response(status, response_headers, exc_info=None):
50 def start_response(status, response_headers, exc_info=None):
50 if exc_info:
51 if exc_info:
51 try:
52 try:
52 if headers_sent:
53 if headers_sent:
53 # Re-raise original exception if headers sent
54 # Re-raise original exception if headers sent
54 raise exc_info[0], exc_info[1], exc_info[2]
55 raise exc_info[0], exc_info[1], exc_info[2]
55 finally:
56 finally:
56 exc_info = None # avoid dangling circular ref
57 exc_info = None # avoid dangling circular ref
57 elif headers_set:
58 elif headers_set:
58 raise AssertionError("Headers already set!")
59 raise AssertionError("Headers already set!")
59
60
60 headers_set[:] = [status, response_headers]
61 headers_set[:] = [status, response_headers]
61 return write
62 return write
62
63
63 result = application(environ, start_response)
64 result = application(environ, start_response)
64 try:
65 try:
65 for data in result:
66 for data in result:
66 if data: # don't send headers until body appears
67 if data: # don't send headers until body appears
67 write(data)
68 write(data)
68 if not headers_sent:
69 if not headers_sent:
69 write('') # send headers now if body was empty
70 write('') # send headers now if body was empty
70 finally:
71 finally:
71 if hasattr(result,'close'):
72 if hasattr(result,'close'):
72 result.close()
73 result.close()
@@ -1,1950 +1,1950 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 if not path:
23 if not path:
24 p = os.getcwd()
24 p = os.getcwd()
25 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
26 oldp = p
26 oldp = p
27 p = os.path.dirname(p)
27 p = os.path.dirname(p)
28 if p == oldp:
28 if p == oldp:
29 raise repo.RepoError(_("There is no Mercurial repository"
29 raise repo.RepoError(_("There is no Mercurial repository"
30 " here (.hg not found)"))
30 " here (.hg not found)"))
31 path = p
31 path = p
32
32
33 self.root = os.path.realpath(path)
33 self.root = os.path.realpath(path)
34 self.path = os.path.join(self.root, ".hg")
34 self.path = os.path.join(self.root, ".hg")
35 self.origroot = path
35 self.origroot = path
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 if not os.path.isdir(self.path):
39 if not os.path.isdir(self.path):
40 if create:
40 if create:
41 if not os.path.exists(path):
41 if not os.path.exists(path):
42 os.mkdir(path)
42 os.mkdir(path)
43 os.mkdir(self.path)
43 os.mkdir(self.path)
44 requirements = ["revlogv1"]
44 requirements = ["revlogv1"]
45 if parentui.configbool('format', 'usestore', True):
45 if parentui.configbool('format', 'usestore', True):
46 os.mkdir(os.path.join(self.path, "store"))
46 os.mkdir(os.path.join(self.path, "store"))
47 requirements.append("store")
47 requirements.append("store")
48 # create an invalid changelog
48 # create an invalid changelog
49 self.opener("00changelog.i", "a").write(
49 self.opener("00changelog.i", "a").write(
50 '\0\0\0\2' # represents revlogv2
50 '\0\0\0\2' # represents revlogv2
51 ' dummy changelog to prevent using the old repo layout'
51 ' dummy changelog to prevent using the old repo layout'
52 )
52 )
53 reqfile = self.opener("requires", "w")
53 reqfile = self.opener("requires", "w")
54 for r in requirements:
54 for r in requirements:
55 reqfile.write("%s\n" % r)
55 reqfile.write("%s\n" % r)
56 reqfile.close()
56 reqfile.close()
57 else:
57 else:
58 raise repo.RepoError(_("repository %s not found") % path)
58 raise repo.RepoError(_("repository %s not found") % path)
59 elif create:
59 elif create:
60 raise repo.RepoError(_("repository %s already exists") % path)
60 raise repo.RepoError(_("repository %s already exists") % path)
61 else:
61 else:
62 # find requirements
62 # find requirements
63 try:
63 try:
64 requirements = self.opener("requires").read().splitlines()
64 requirements = self.opener("requires").read().splitlines()
65 except IOError, inst:
65 except IOError, inst:
66 if inst.errno != errno.ENOENT:
66 if inst.errno != errno.ENOENT:
67 raise
67 raise
68 requirements = []
68 requirements = []
69 # check them
69 # check them
70 for r in requirements:
70 for r in requirements:
71 if r not in self.supported:
71 if r not in self.supported:
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73
73
74 # setup store
74 # setup store
75 if "store" in requirements:
75 if "store" in requirements:
76 self.encodefn = util.encodefilename
76 self.encodefn = util.encodefilename
77 self.decodefn = util.decodefilename
77 self.decodefn = util.decodefilename
78 self.spath = os.path.join(self.path, "store")
78 self.spath = os.path.join(self.path, "store")
79 else:
79 else:
80 self.encodefn = lambda x: x
80 self.encodefn = lambda x: x
81 self.decodefn = lambda x: x
81 self.decodefn = lambda x: x
82 self.spath = self.path
82 self.spath = self.path
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84
84
85 self.ui = ui.ui(parentui=parentui)
85 self.ui = ui.ui(parentui=parentui)
86 try:
86 try:
87 self.ui.readconfig(self.join("hgrc"), self.root)
87 self.ui.readconfig(self.join("hgrc"), self.root)
88 except IOError:
88 except IOError:
89 pass
89 pass
90
90
91 self.changelog = changelog.changelog(self.sopener)
91 self.changelog = changelog.changelog(self.sopener)
92 self.sopener.defversion = self.changelog.version
92 self.sopener.defversion = self.changelog.version
93 self.manifest = manifest.manifest(self.sopener)
93 self.manifest = manifest.manifest(self.sopener)
94
94
95 fallback = self.ui.config('ui', 'fallbackencoding')
95 fallback = self.ui.config('ui', 'fallbackencoding')
96 if fallback:
96 if fallback:
97 util._fallbackencoding = fallback
97 util._fallbackencoding = fallback
98
98
99 self.tagscache = None
99 self.tagscache = None
100 self.branchcache = None
100 self.branchcache = None
101 self.nodetagscache = None
101 self.nodetagscache = None
102 self.filterpats = {}
102 self.filterpats = {}
103 self.transhandle = None
103 self.transhandle = None
104
104
105 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
105 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
106
106
107 def url(self):
107 def url(self):
108 return 'file:' + self.root
108 return 'file:' + self.root
109
109
110 def hook(self, name, throw=False, **args):
110 def hook(self, name, throw=False, **args):
111 def callhook(hname, funcname):
111 def callhook(hname, funcname):
112 '''call python hook. hook is callable object, looked up as
112 '''call python hook. hook is callable object, looked up as
113 name in python module. if callable returns "true", hook
113 name in python module. if callable returns "true", hook
114 fails, else passes. if hook raises exception, treated as
114 fails, else passes. if hook raises exception, treated as
115 hook failure. exception propagates if throw is "true".
115 hook failure. exception propagates if throw is "true".
116
116
117 reason for "true" meaning "hook failed" is so that
117 reason for "true" meaning "hook failed" is so that
118 unmodified commands (e.g. mercurial.commands.update) can
118 unmodified commands (e.g. mercurial.commands.update) can
119 be run as hooks without wrappers to convert return values.'''
119 be run as hooks without wrappers to convert return values.'''
120
120
121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
122 obj = funcname
122 obj = funcname
123 if not callable(obj):
123 if not callable(obj):
124 d = funcname.rfind('.')
124 d = funcname.rfind('.')
125 if d == -1:
125 if d == -1:
126 raise util.Abort(_('%s hook is invalid ("%s" not in '
126 raise util.Abort(_('%s hook is invalid ("%s" not in '
127 'a module)') % (hname, funcname))
127 'a module)') % (hname, funcname))
128 modname = funcname[:d]
128 modname = funcname[:d]
129 try:
129 try:
130 obj = __import__(modname)
130 obj = __import__(modname)
131 except ImportError:
131 except ImportError:
132 try:
132 try:
133 # extensions are loaded with hgext_ prefix
133 # extensions are loaded with hgext_ prefix
134 obj = __import__("hgext_%s" % modname)
134 obj = __import__("hgext_%s" % modname)
135 except ImportError:
135 except ImportError:
136 raise util.Abort(_('%s hook is invalid '
136 raise util.Abort(_('%s hook is invalid '
137 '(import of "%s" failed)') %
137 '(import of "%s" failed)') %
138 (hname, modname))
138 (hname, modname))
139 try:
139 try:
140 for p in funcname.split('.')[1:]:
140 for p in funcname.split('.')[1:]:
141 obj = getattr(obj, p)
141 obj = getattr(obj, p)
142 except AttributeError, err:
142 except AttributeError, err:
143 raise util.Abort(_('%s hook is invalid '
143 raise util.Abort(_('%s hook is invalid '
144 '("%s" is not defined)') %
144 '("%s" is not defined)') %
145 (hname, funcname))
145 (hname, funcname))
146 if not callable(obj):
146 if not callable(obj):
147 raise util.Abort(_('%s hook is invalid '
147 raise util.Abort(_('%s hook is invalid '
148 '("%s" is not callable)') %
148 '("%s" is not callable)') %
149 (hname, funcname))
149 (hname, funcname))
150 try:
150 try:
151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
152 except (KeyboardInterrupt, util.SignalInterrupt):
152 except (KeyboardInterrupt, util.SignalInterrupt):
153 raise
153 raise
154 except Exception, exc:
154 except Exception, exc:
155 if isinstance(exc, util.Abort):
155 if isinstance(exc, util.Abort):
156 self.ui.warn(_('error: %s hook failed: %s\n') %
156 self.ui.warn(_('error: %s hook failed: %s\n') %
157 (hname, exc.args[0]))
157 (hname, exc.args[0]))
158 else:
158 else:
159 self.ui.warn(_('error: %s hook raised an exception: '
159 self.ui.warn(_('error: %s hook raised an exception: '
160 '%s\n') % (hname, exc))
160 '%s\n') % (hname, exc))
161 if throw:
161 if throw:
162 raise
162 raise
163 self.ui.print_exc()
163 self.ui.print_exc()
164 return True
164 return True
165 if r:
165 if r:
166 if throw:
166 if throw:
167 raise util.Abort(_('%s hook failed') % hname)
167 raise util.Abort(_('%s hook failed') % hname)
168 self.ui.warn(_('warning: %s hook failed\n') % hname)
168 self.ui.warn(_('warning: %s hook failed\n') % hname)
169 return r
169 return r
170
170
171 def runhook(name, cmd):
171 def runhook(name, cmd):
172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
174 r = util.system(cmd, environ=env, cwd=self.root)
174 r = util.system(cmd, environ=env, cwd=self.root)
175 if r:
175 if r:
176 desc, r = util.explain_exit(r)
176 desc, r = util.explain_exit(r)
177 if throw:
177 if throw:
178 raise util.Abort(_('%s hook %s') % (name, desc))
178 raise util.Abort(_('%s hook %s') % (name, desc))
179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
180 return r
180 return r
181
181
182 r = False
182 r = False
183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
184 if hname.split(".", 1)[0] == name and cmd]
184 if hname.split(".", 1)[0] == name and cmd]
185 hooks.sort()
185 hooks.sort()
186 for hname, cmd in hooks:
186 for hname, cmd in hooks:
187 if callable(cmd):
187 if callable(cmd):
188 r = callhook(hname, cmd) or r
188 r = callhook(hname, cmd) or r
189 elif cmd.startswith('python:'):
189 elif cmd.startswith('python:'):
190 r = callhook(hname, cmd[7:].strip()) or r
190 r = callhook(hname, cmd[7:].strip()) or r
191 else:
191 else:
192 r = runhook(hname, cmd) or r
192 r = runhook(hname, cmd) or r
193 return r
193 return r
194
194
195 tag_disallowed = ':\r\n'
195 tag_disallowed = ':\r\n'
196
196
197 def _tag(self, name, node, message, local, user, date, parent=None):
197 def _tag(self, name, node, message, local, user, date, parent=None):
198 use_dirstate = parent is None
198 use_dirstate = parent is None
199
199
200 for c in self.tag_disallowed:
200 for c in self.tag_disallowed:
201 if c in name:
201 if c in name:
202 raise util.Abort(_('%r cannot be used in a tag name') % c)
202 raise util.Abort(_('%r cannot be used in a tag name') % c)
203
203
204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
205
205
206 if local:
206 if local:
207 # local tags are stored in the current charset
207 # local tags are stored in the current charset
208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
209 self.hook('tag', node=hex(node), tag=name, local=local)
209 self.hook('tag', node=hex(node), tag=name, local=local)
210 return
210 return
211
211
212 # committed tags are stored in UTF-8
212 # committed tags are stored in UTF-8
213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
214 if use_dirstate:
214 if use_dirstate:
215 self.wfile('.hgtags', 'ab').write(line)
215 self.wfile('.hgtags', 'ab').write(line)
216 else:
216 else:
217 ntags = self.filectx('.hgtags', parent).data()
217 ntags = self.filectx('.hgtags', parent).data()
218 self.wfile('.hgtags', 'ab').write(ntags + line)
218 self.wfile('.hgtags', 'ab').write(ntags + line)
219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
220 self.add(['.hgtags'])
220 self.add(['.hgtags'])
221
221
222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
223
223
224 self.hook('tag', node=hex(node), tag=name, local=local)
224 self.hook('tag', node=hex(node), tag=name, local=local)
225
225
226 return tagnode
226 return tagnode
227
227
228 def tag(self, name, node, message, local, user, date):
228 def tag(self, name, node, message, local, user, date):
229 '''tag a revision with a symbolic name.
229 '''tag a revision with a symbolic name.
230
230
231 if local is True, the tag is stored in a per-repository file.
231 if local is True, the tag is stored in a per-repository file.
232 otherwise, it is stored in the .hgtags file, and a new
232 otherwise, it is stored in the .hgtags file, and a new
233 changeset is committed with the change.
233 changeset is committed with the change.
234
234
235 keyword arguments:
235 keyword arguments:
236
236
237 local: whether to store tag in non-version-controlled file
237 local: whether to store tag in non-version-controlled file
238 (default False)
238 (default False)
239
239
240 message: commit message to use if committing
240 message: commit message to use if committing
241
241
242 user: name of user to use if committing
242 user: name of user to use if committing
243
243
244 date: date tuple to use if committing'''
244 date: date tuple to use if committing'''
245
245
246 for x in self.status()[:5]:
246 for x in self.status()[:5]:
247 if '.hgtags' in x:
247 if '.hgtags' in x:
248 raise util.Abort(_('working copy of .hgtags is changed '
248 raise util.Abort(_('working copy of .hgtags is changed '
249 '(please commit .hgtags manually)'))
249 '(please commit .hgtags manually)'))
250
250
251
251
252 self._tag(name, node, message, local, user, date)
252 self._tag(name, node, message, local, user, date)
253
253
254 def tags(self):
254 def tags(self):
255 '''return a mapping of tag to node'''
255 '''return a mapping of tag to node'''
256 if self.tagscache:
256 if self.tagscache:
257 return self.tagscache
257 return self.tagscache
258
258
259 globaltags = {}
259 globaltags = {}
260
260
261 def readtags(lines, fn):
261 def readtags(lines, fn):
262 filetags = {}
262 filetags = {}
263 count = 0
263 count = 0
264
264
265 def warn(msg):
265 def warn(msg):
266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
267
267
268 for l in lines:
268 for l in lines:
269 count += 1
269 count += 1
270 if not l:
270 if not l:
271 continue
271 continue
272 s = l.split(" ", 1)
272 s = l.split(" ", 1)
273 if len(s) != 2:
273 if len(s) != 2:
274 warn(_("cannot parse entry"))
274 warn(_("cannot parse entry"))
275 continue
275 continue
276 node, key = s
276 node, key = s
277 key = util.tolocal(key.strip()) # stored in UTF-8
277 key = util.tolocal(key.strip()) # stored in UTF-8
278 try:
278 try:
279 bin_n = bin(node)
279 bin_n = bin(node)
280 except TypeError:
280 except TypeError:
281 warn(_("node '%s' is not well formed") % node)
281 warn(_("node '%s' is not well formed") % node)
282 continue
282 continue
283 if bin_n not in self.changelog.nodemap:
283 if bin_n not in self.changelog.nodemap:
284 warn(_("tag '%s' refers to unknown node") % key)
284 warn(_("tag '%s' refers to unknown node") % key)
285 continue
285 continue
286
286
287 h = []
287 h = []
288 if key in filetags:
288 if key in filetags:
289 n, h = filetags[key]
289 n, h = filetags[key]
290 h.append(n)
290 h.append(n)
291 filetags[key] = (bin_n, h)
291 filetags[key] = (bin_n, h)
292
292
293 for k,nh in filetags.items():
293 for k,nh in filetags.items():
294 if k not in globaltags:
294 if k not in globaltags:
295 globaltags[k] = nh
295 globaltags[k] = nh
296 continue
296 continue
297 # we prefer the global tag if:
297 # we prefer the global tag if:
298 # it supercedes us OR
298 # it supercedes us OR
299 # mutual supercedes and it has a higher rank
299 # mutual supercedes and it has a higher rank
300 # otherwise we win because we're tip-most
300 # otherwise we win because we're tip-most
301 an, ah = nh
301 an, ah = nh
302 bn, bh = globaltags[k]
302 bn, bh = globaltags[k]
303 if bn != an and an in bh and \
303 if bn != an and an in bh and \
304 (bn not in ah or len(bh) > len(ah)):
304 (bn not in ah or len(bh) > len(ah)):
305 an = bn
305 an = bn
306 ah.append([n for n in bh if n not in ah])
306 ah.extend([n for n in bh if n not in ah])
307 globaltags[k] = an, ah
307 globaltags[k] = an, ah
308
308
309 # read the tags file from each head, ending with the tip
309 # read the tags file from each head, ending with the tip
310 f = None
310 f = None
311 for rev, node, fnode in self._hgtagsnodes():
311 for rev, node, fnode in self._hgtagsnodes():
312 f = (f and f.filectx(fnode) or
312 f = (f and f.filectx(fnode) or
313 self.filectx('.hgtags', fileid=fnode))
313 self.filectx('.hgtags', fileid=fnode))
314 readtags(f.data().splitlines(), f)
314 readtags(f.data().splitlines(), f)
315
315
316 try:
316 try:
317 data = util.fromlocal(self.opener("localtags").read())
317 data = util.fromlocal(self.opener("localtags").read())
318 # localtags are stored in the local character set
318 # localtags are stored in the local character set
319 # while the internal tag table is stored in UTF-8
319 # while the internal tag table is stored in UTF-8
320 readtags(data.splitlines(), "localtags")
320 readtags(data.splitlines(), "localtags")
321 except IOError:
321 except IOError:
322 pass
322 pass
323
323
324 self.tagscache = {}
324 self.tagscache = {}
325 for k,nh in globaltags.items():
325 for k,nh in globaltags.items():
326 n = nh[0]
326 n = nh[0]
327 if n != nullid:
327 if n != nullid:
328 self.tagscache[k] = n
328 self.tagscache[k] = n
329 self.tagscache['tip'] = self.changelog.tip()
329 self.tagscache['tip'] = self.changelog.tip()
330
330
331 return self.tagscache
331 return self.tagscache
332
332
333 def _hgtagsnodes(self):
333 def _hgtagsnodes(self):
334 heads = self.heads()
334 heads = self.heads()
335 heads.reverse()
335 heads.reverse()
336 last = {}
336 last = {}
337 ret = []
337 ret = []
338 for node in heads:
338 for node in heads:
339 c = self.changectx(node)
339 c = self.changectx(node)
340 rev = c.rev()
340 rev = c.rev()
341 try:
341 try:
342 fnode = c.filenode('.hgtags')
342 fnode = c.filenode('.hgtags')
343 except revlog.LookupError:
343 except revlog.LookupError:
344 continue
344 continue
345 ret.append((rev, node, fnode))
345 ret.append((rev, node, fnode))
346 if fnode in last:
346 if fnode in last:
347 ret[last[fnode]] = None
347 ret[last[fnode]] = None
348 last[fnode] = len(ret) - 1
348 last[fnode] = len(ret) - 1
349 return [item for item in ret if item]
349 return [item for item in ret if item]
350
350
351 def tagslist(self):
351 def tagslist(self):
352 '''return a list of tags ordered by revision'''
352 '''return a list of tags ordered by revision'''
353 l = []
353 l = []
354 for t, n in self.tags().items():
354 for t, n in self.tags().items():
355 try:
355 try:
356 r = self.changelog.rev(n)
356 r = self.changelog.rev(n)
357 except:
357 except:
358 r = -2 # sort to the beginning of the list if unknown
358 r = -2 # sort to the beginning of the list if unknown
359 l.append((r, t, n))
359 l.append((r, t, n))
360 l.sort()
360 l.sort()
361 return [(t, n) for r, t, n in l]
361 return [(t, n) for r, t, n in l]
362
362
363 def nodetags(self, node):
363 def nodetags(self, node):
364 '''return the tags associated with a node'''
364 '''return the tags associated with a node'''
365 if not self.nodetagscache:
365 if not self.nodetagscache:
366 self.nodetagscache = {}
366 self.nodetagscache = {}
367 for t, n in self.tags().items():
367 for t, n in self.tags().items():
368 self.nodetagscache.setdefault(n, []).append(t)
368 self.nodetagscache.setdefault(n, []).append(t)
369 return self.nodetagscache.get(node, [])
369 return self.nodetagscache.get(node, [])
370
370
371 def _branchtags(self):
371 def _branchtags(self):
372 partial, last, lrev = self._readbranchcache()
372 partial, last, lrev = self._readbranchcache()
373
373
374 tiprev = self.changelog.count() - 1
374 tiprev = self.changelog.count() - 1
375 if lrev != tiprev:
375 if lrev != tiprev:
376 self._updatebranchcache(partial, lrev+1, tiprev+1)
376 self._updatebranchcache(partial, lrev+1, tiprev+1)
377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
378
378
379 return partial
379 return partial
380
380
381 def branchtags(self):
381 def branchtags(self):
382 if self.branchcache is not None:
382 if self.branchcache is not None:
383 return self.branchcache
383 return self.branchcache
384
384
385 self.branchcache = {} # avoid recursion in changectx
385 self.branchcache = {} # avoid recursion in changectx
386 partial = self._branchtags()
386 partial = self._branchtags()
387
387
388 # the branch cache is stored on disk as UTF-8, but in the local
388 # the branch cache is stored on disk as UTF-8, but in the local
389 # charset internally
389 # charset internally
390 for k, v in partial.items():
390 for k, v in partial.items():
391 self.branchcache[util.tolocal(k)] = v
391 self.branchcache[util.tolocal(k)] = v
392 return self.branchcache
392 return self.branchcache
393
393
394 def _readbranchcache(self):
394 def _readbranchcache(self):
395 partial = {}
395 partial = {}
396 try:
396 try:
397 f = self.opener("branch.cache")
397 f = self.opener("branch.cache")
398 lines = f.read().split('\n')
398 lines = f.read().split('\n')
399 f.close()
399 f.close()
400 except (IOError, OSError):
400 except (IOError, OSError):
401 return {}, nullid, nullrev
401 return {}, nullid, nullrev
402
402
403 try:
403 try:
404 last, lrev = lines.pop(0).split(" ", 1)
404 last, lrev = lines.pop(0).split(" ", 1)
405 last, lrev = bin(last), int(lrev)
405 last, lrev = bin(last), int(lrev)
406 if not (lrev < self.changelog.count() and
406 if not (lrev < self.changelog.count() and
407 self.changelog.node(lrev) == last): # sanity check
407 self.changelog.node(lrev) == last): # sanity check
408 # invalidate the cache
408 # invalidate the cache
409 raise ValueError('Invalid branch cache: unknown tip')
409 raise ValueError('Invalid branch cache: unknown tip')
410 for l in lines:
410 for l in lines:
411 if not l: continue
411 if not l: continue
412 node, label = l.split(" ", 1)
412 node, label = l.split(" ", 1)
413 partial[label.strip()] = bin(node)
413 partial[label.strip()] = bin(node)
414 except (KeyboardInterrupt, util.SignalInterrupt):
414 except (KeyboardInterrupt, util.SignalInterrupt):
415 raise
415 raise
416 except Exception, inst:
416 except Exception, inst:
417 if self.ui.debugflag:
417 if self.ui.debugflag:
418 self.ui.warn(str(inst), '\n')
418 self.ui.warn(str(inst), '\n')
419 partial, last, lrev = {}, nullid, nullrev
419 partial, last, lrev = {}, nullid, nullrev
420 return partial, last, lrev
420 return partial, last, lrev
421
421
422 def _writebranchcache(self, branches, tip, tiprev):
422 def _writebranchcache(self, branches, tip, tiprev):
423 try:
423 try:
424 f = self.opener("branch.cache", "w", atomictemp=True)
424 f = self.opener("branch.cache", "w", atomictemp=True)
425 f.write("%s %s\n" % (hex(tip), tiprev))
425 f.write("%s %s\n" % (hex(tip), tiprev))
426 for label, node in branches.iteritems():
426 for label, node in branches.iteritems():
427 f.write("%s %s\n" % (hex(node), label))
427 f.write("%s %s\n" % (hex(node), label))
428 f.rename()
428 f.rename()
429 except (IOError, OSError):
429 except (IOError, OSError):
430 pass
430 pass
431
431
432 def _updatebranchcache(self, partial, start, end):
432 def _updatebranchcache(self, partial, start, end):
433 for r in xrange(start, end):
433 for r in xrange(start, end):
434 c = self.changectx(r)
434 c = self.changectx(r)
435 b = c.branch()
435 b = c.branch()
436 partial[b] = c.node()
436 partial[b] = c.node()
437
437
438 def lookup(self, key):
438 def lookup(self, key):
439 if key == '.':
439 if key == '.':
440 key = self.dirstate.parents()[0]
440 key = self.dirstate.parents()[0]
441 if key == nullid:
441 if key == nullid:
442 raise repo.RepoError(_("no revision checked out"))
442 raise repo.RepoError(_("no revision checked out"))
443 elif key == 'null':
443 elif key == 'null':
444 return nullid
444 return nullid
445 n = self.changelog._match(key)
445 n = self.changelog._match(key)
446 if n:
446 if n:
447 return n
447 return n
448 if key in self.tags():
448 if key in self.tags():
449 return self.tags()[key]
449 return self.tags()[key]
450 if key in self.branchtags():
450 if key in self.branchtags():
451 return self.branchtags()[key]
451 return self.branchtags()[key]
452 n = self.changelog._partialmatch(key)
452 n = self.changelog._partialmatch(key)
453 if n:
453 if n:
454 return n
454 return n
455 raise repo.RepoError(_("unknown revision '%s'") % key)
455 raise repo.RepoError(_("unknown revision '%s'") % key)
456
456
457 def dev(self):
457 def dev(self):
458 return os.lstat(self.path).st_dev
458 return os.lstat(self.path).st_dev
459
459
460 def local(self):
460 def local(self):
461 return True
461 return True
462
462
463 def join(self, f):
463 def join(self, f):
464 return os.path.join(self.path, f)
464 return os.path.join(self.path, f)
465
465
466 def sjoin(self, f):
466 def sjoin(self, f):
467 f = self.encodefn(f)
467 f = self.encodefn(f)
468 return os.path.join(self.spath, f)
468 return os.path.join(self.spath, f)
469
469
470 def wjoin(self, f):
470 def wjoin(self, f):
471 return os.path.join(self.root, f)
471 return os.path.join(self.root, f)
472
472
473 def file(self, f):
473 def file(self, f):
474 if f[0] == '/':
474 if f[0] == '/':
475 f = f[1:]
475 f = f[1:]
476 return filelog.filelog(self.sopener, f)
476 return filelog.filelog(self.sopener, f)
477
477
478 def changectx(self, changeid=None):
478 def changectx(self, changeid=None):
479 return context.changectx(self, changeid)
479 return context.changectx(self, changeid)
480
480
481 def workingctx(self):
481 def workingctx(self):
482 return context.workingctx(self)
482 return context.workingctx(self)
483
483
484 def parents(self, changeid=None):
484 def parents(self, changeid=None):
485 '''
485 '''
486 get list of changectxs for parents of changeid or working directory
486 get list of changectxs for parents of changeid or working directory
487 '''
487 '''
488 if changeid is None:
488 if changeid is None:
489 pl = self.dirstate.parents()
489 pl = self.dirstate.parents()
490 else:
490 else:
491 n = self.changelog.lookup(changeid)
491 n = self.changelog.lookup(changeid)
492 pl = self.changelog.parents(n)
492 pl = self.changelog.parents(n)
493 if pl[1] == nullid:
493 if pl[1] == nullid:
494 return [self.changectx(pl[0])]
494 return [self.changectx(pl[0])]
495 return [self.changectx(pl[0]), self.changectx(pl[1])]
495 return [self.changectx(pl[0]), self.changectx(pl[1])]
496
496
497 def filectx(self, path, changeid=None, fileid=None):
497 def filectx(self, path, changeid=None, fileid=None):
498 """changeid can be a changeset revision, node, or tag.
498 """changeid can be a changeset revision, node, or tag.
499 fileid can be a file revision or node."""
499 fileid can be a file revision or node."""
500 return context.filectx(self, path, changeid, fileid)
500 return context.filectx(self, path, changeid, fileid)
501
501
502 def getcwd(self):
502 def getcwd(self):
503 return self.dirstate.getcwd()
503 return self.dirstate.getcwd()
504
504
505 def wfile(self, f, mode='r'):
505 def wfile(self, f, mode='r'):
506 return self.wopener(f, mode)
506 return self.wopener(f, mode)
507
507
508 def _link(self, f):
508 def _link(self, f):
509 return os.path.islink(self.wjoin(f))
509 return os.path.islink(self.wjoin(f))
510
510
511 def _filter(self, filter, filename, data):
511 def _filter(self, filter, filename, data):
512 if filter not in self.filterpats:
512 if filter not in self.filterpats:
513 l = []
513 l = []
514 for pat, cmd in self.ui.configitems(filter):
514 for pat, cmd in self.ui.configitems(filter):
515 mf = util.matcher(self.root, "", [pat], [], [])[1]
515 mf = util.matcher(self.root, "", [pat], [], [])[1]
516 l.append((mf, cmd))
516 l.append((mf, cmd))
517 self.filterpats[filter] = l
517 self.filterpats[filter] = l
518
518
519 for mf, cmd in self.filterpats[filter]:
519 for mf, cmd in self.filterpats[filter]:
520 if mf(filename):
520 if mf(filename):
521 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
521 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
522 data = util.filter(data, cmd)
522 data = util.filter(data, cmd)
523 break
523 break
524
524
525 return data
525 return data
526
526
527 def wread(self, filename):
527 def wread(self, filename):
528 if self._link(filename):
528 if self._link(filename):
529 data = os.readlink(self.wjoin(filename))
529 data = os.readlink(self.wjoin(filename))
530 else:
530 else:
531 data = self.wopener(filename, 'r').read()
531 data = self.wopener(filename, 'r').read()
532 return self._filter("encode", filename, data)
532 return self._filter("encode", filename, data)
533
533
534 def wwrite(self, filename, data, flags):
534 def wwrite(self, filename, data, flags):
535 data = self._filter("decode", filename, data)
535 data = self._filter("decode", filename, data)
536 if "l" in flags:
536 if "l" in flags:
537 f = self.wjoin(filename)
537 f = self.wjoin(filename)
538 try:
538 try:
539 os.unlink(f)
539 os.unlink(f)
540 except OSError:
540 except OSError:
541 pass
541 pass
542 d = os.path.dirname(f)
542 d = os.path.dirname(f)
543 if not os.path.exists(d):
543 if not os.path.exists(d):
544 os.makedirs(d)
544 os.makedirs(d)
545 os.symlink(data, f)
545 os.symlink(data, f)
546 else:
546 else:
547 try:
547 try:
548 if self._link(filename):
548 if self._link(filename):
549 os.unlink(self.wjoin(filename))
549 os.unlink(self.wjoin(filename))
550 except OSError:
550 except OSError:
551 pass
551 pass
552 self.wopener(filename, 'w').write(data)
552 self.wopener(filename, 'w').write(data)
553 util.set_exec(self.wjoin(filename), "x" in flags)
553 util.set_exec(self.wjoin(filename), "x" in flags)
554
554
555 def wwritedata(self, filename, data):
555 def wwritedata(self, filename, data):
556 return self._filter("decode", filename, data)
556 return self._filter("decode", filename, data)
557
557
558 def transaction(self):
558 def transaction(self):
559 tr = self.transhandle
559 tr = self.transhandle
560 if tr != None and tr.running():
560 if tr != None and tr.running():
561 return tr.nest()
561 return tr.nest()
562
562
563 # save dirstate for rollback
563 # save dirstate for rollback
564 try:
564 try:
565 ds = self.opener("dirstate").read()
565 ds = self.opener("dirstate").read()
566 except IOError:
566 except IOError:
567 ds = ""
567 ds = ""
568 self.opener("journal.dirstate", "w").write(ds)
568 self.opener("journal.dirstate", "w").write(ds)
569
569
570 renames = [(self.sjoin("journal"), self.sjoin("undo")),
570 renames = [(self.sjoin("journal"), self.sjoin("undo")),
571 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
571 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
572 tr = transaction.transaction(self.ui.warn, self.sopener,
572 tr = transaction.transaction(self.ui.warn, self.sopener,
573 self.sjoin("journal"),
573 self.sjoin("journal"),
574 aftertrans(renames))
574 aftertrans(renames))
575 self.transhandle = tr
575 self.transhandle = tr
576 return tr
576 return tr
577
577
578 def recover(self):
578 def recover(self):
579 l = self.lock()
579 l = self.lock()
580 if os.path.exists(self.sjoin("journal")):
580 if os.path.exists(self.sjoin("journal")):
581 self.ui.status(_("rolling back interrupted transaction\n"))
581 self.ui.status(_("rolling back interrupted transaction\n"))
582 transaction.rollback(self.sopener, self.sjoin("journal"))
582 transaction.rollback(self.sopener, self.sjoin("journal"))
583 self.reload()
583 self.reload()
584 return True
584 return True
585 else:
585 else:
586 self.ui.warn(_("no interrupted transaction available\n"))
586 self.ui.warn(_("no interrupted transaction available\n"))
587 return False
587 return False
588
588
589 def rollback(self, wlock=None, lock=None):
589 def rollback(self, wlock=None, lock=None):
590 if not wlock:
590 if not wlock:
591 wlock = self.wlock()
591 wlock = self.wlock()
592 if not lock:
592 if not lock:
593 lock = self.lock()
593 lock = self.lock()
594 if os.path.exists(self.sjoin("undo")):
594 if os.path.exists(self.sjoin("undo")):
595 self.ui.status(_("rolling back last transaction\n"))
595 self.ui.status(_("rolling back last transaction\n"))
596 transaction.rollback(self.sopener, self.sjoin("undo"))
596 transaction.rollback(self.sopener, self.sjoin("undo"))
597 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
597 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
598 self.reload()
598 self.reload()
599 self.wreload()
599 self.wreload()
600 else:
600 else:
601 self.ui.warn(_("no rollback information available\n"))
601 self.ui.warn(_("no rollback information available\n"))
602
602
603 def wreload(self):
603 def wreload(self):
604 self.dirstate.reload()
604 self.dirstate.reload()
605
605
606 def reload(self):
606 def reload(self):
607 self.changelog.load()
607 self.changelog.load()
608 self.manifest.load()
608 self.manifest.load()
609 self.tagscache = None
609 self.tagscache = None
610 self.nodetagscache = None
610 self.nodetagscache = None
611
611
612 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
612 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
613 desc=None):
613 desc=None):
614 try:
614 try:
615 l = lock.lock(lockname, 0, releasefn, desc=desc)
615 l = lock.lock(lockname, 0, releasefn, desc=desc)
616 except lock.LockHeld, inst:
616 except lock.LockHeld, inst:
617 if not wait:
617 if not wait:
618 raise
618 raise
619 self.ui.warn(_("waiting for lock on %s held by %r\n") %
619 self.ui.warn(_("waiting for lock on %s held by %r\n") %
620 (desc, inst.locker))
620 (desc, inst.locker))
621 # default to 600 seconds timeout
621 # default to 600 seconds timeout
622 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
622 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
623 releasefn, desc=desc)
623 releasefn, desc=desc)
624 if acquirefn:
624 if acquirefn:
625 acquirefn()
625 acquirefn()
626 return l
626 return l
627
627
628 def lock(self, wait=1):
628 def lock(self, wait=1):
629 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
629 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
630 desc=_('repository %s') % self.origroot)
630 desc=_('repository %s') % self.origroot)
631
631
632 def wlock(self, wait=1):
632 def wlock(self, wait=1):
633 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
633 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
634 self.wreload,
634 self.wreload,
635 desc=_('working directory of %s') % self.origroot)
635 desc=_('working directory of %s') % self.origroot)
636
636
637 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
637 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
638 """
638 """
639 commit an individual file as part of a larger transaction
639 commit an individual file as part of a larger transaction
640 """
640 """
641
641
642 t = self.wread(fn)
642 t = self.wread(fn)
643 fl = self.file(fn)
643 fl = self.file(fn)
644 fp1 = manifest1.get(fn, nullid)
644 fp1 = manifest1.get(fn, nullid)
645 fp2 = manifest2.get(fn, nullid)
645 fp2 = manifest2.get(fn, nullid)
646
646
647 meta = {}
647 meta = {}
648 cp = self.dirstate.copied(fn)
648 cp = self.dirstate.copied(fn)
649 if cp:
649 if cp:
650 # Mark the new revision of this file as a copy of another
650 # Mark the new revision of this file as a copy of another
651 # file. This copy data will effectively act as a parent
651 # file. This copy data will effectively act as a parent
652 # of this new revision. If this is a merge, the first
652 # of this new revision. If this is a merge, the first
653 # parent will be the nullid (meaning "look up the copy data")
653 # parent will be the nullid (meaning "look up the copy data")
654 # and the second one will be the other parent. For example:
654 # and the second one will be the other parent. For example:
655 #
655 #
656 # 0 --- 1 --- 3 rev1 changes file foo
656 # 0 --- 1 --- 3 rev1 changes file foo
657 # \ / rev2 renames foo to bar and changes it
657 # \ / rev2 renames foo to bar and changes it
658 # \- 2 -/ rev3 should have bar with all changes and
658 # \- 2 -/ rev3 should have bar with all changes and
659 # should record that bar descends from
659 # should record that bar descends from
660 # bar in rev2 and foo in rev1
660 # bar in rev2 and foo in rev1
661 #
661 #
662 # this allows this merge to succeed:
662 # this allows this merge to succeed:
663 #
663 #
664 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
664 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
665 # \ / merging rev3 and rev4 should use bar@rev2
665 # \ / merging rev3 and rev4 should use bar@rev2
666 # \- 2 --- 4 as the merge base
666 # \- 2 --- 4 as the merge base
667 #
667 #
668 meta["copy"] = cp
668 meta["copy"] = cp
669 if not manifest2: # not a branch merge
669 if not manifest2: # not a branch merge
670 meta["copyrev"] = hex(manifest1.get(cp, nullid))
670 meta["copyrev"] = hex(manifest1.get(cp, nullid))
671 fp2 = nullid
671 fp2 = nullid
672 elif fp2 != nullid: # copied on remote side
672 elif fp2 != nullid: # copied on remote side
673 meta["copyrev"] = hex(manifest1.get(cp, nullid))
673 meta["copyrev"] = hex(manifest1.get(cp, nullid))
674 elif fp1 != nullid: # copied on local side, reversed
674 elif fp1 != nullid: # copied on local side, reversed
675 meta["copyrev"] = hex(manifest2.get(cp))
675 meta["copyrev"] = hex(manifest2.get(cp))
676 fp2 = fp1
676 fp2 = fp1
677 else: # directory rename
677 else: # directory rename
678 meta["copyrev"] = hex(manifest1.get(cp, nullid))
678 meta["copyrev"] = hex(manifest1.get(cp, nullid))
679 self.ui.debug(_(" %s: copy %s:%s\n") %
679 self.ui.debug(_(" %s: copy %s:%s\n") %
680 (fn, cp, meta["copyrev"]))
680 (fn, cp, meta["copyrev"]))
681 fp1 = nullid
681 fp1 = nullid
682 elif fp2 != nullid:
682 elif fp2 != nullid:
683 # is one parent an ancestor of the other?
683 # is one parent an ancestor of the other?
684 fpa = fl.ancestor(fp1, fp2)
684 fpa = fl.ancestor(fp1, fp2)
685 if fpa == fp1:
685 if fpa == fp1:
686 fp1, fp2 = fp2, nullid
686 fp1, fp2 = fp2, nullid
687 elif fpa == fp2:
687 elif fpa == fp2:
688 fp2 = nullid
688 fp2 = nullid
689
689
690 # is the file unmodified from the parent? report existing entry
690 # is the file unmodified from the parent? report existing entry
691 if fp2 == nullid and not fl.cmp(fp1, t):
691 if fp2 == nullid and not fl.cmp(fp1, t):
692 return fp1
692 return fp1
693
693
694 changelist.append(fn)
694 changelist.append(fn)
695 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
695 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
696
696
697 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
697 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
698 if p1 is None:
698 if p1 is None:
699 p1, p2 = self.dirstate.parents()
699 p1, p2 = self.dirstate.parents()
700 return self.commit(files=files, text=text, user=user, date=date,
700 return self.commit(files=files, text=text, user=user, date=date,
701 p1=p1, p2=p2, wlock=wlock, extra=extra)
701 p1=p1, p2=p2, wlock=wlock, extra=extra)
702
702
703 def commit(self, files=None, text="", user=None, date=None,
703 def commit(self, files=None, text="", user=None, date=None,
704 match=util.always, force=False, lock=None, wlock=None,
704 match=util.always, force=False, lock=None, wlock=None,
705 force_editor=False, p1=None, p2=None, extra={}):
705 force_editor=False, p1=None, p2=None, extra={}):
706
706
707 commit = []
707 commit = []
708 remove = []
708 remove = []
709 changed = []
709 changed = []
710 use_dirstate = (p1 is None) # not rawcommit
710 use_dirstate = (p1 is None) # not rawcommit
711 extra = extra.copy()
711 extra = extra.copy()
712
712
713 if use_dirstate:
713 if use_dirstate:
714 if files:
714 if files:
715 for f in files:
715 for f in files:
716 s = self.dirstate.state(f)
716 s = self.dirstate.state(f)
717 if s in 'nmai':
717 if s in 'nmai':
718 commit.append(f)
718 commit.append(f)
719 elif s == 'r':
719 elif s == 'r':
720 remove.append(f)
720 remove.append(f)
721 else:
721 else:
722 self.ui.warn(_("%s not tracked!\n") % f)
722 self.ui.warn(_("%s not tracked!\n") % f)
723 else:
723 else:
724 changes = self.status(match=match)[:5]
724 changes = self.status(match=match)[:5]
725 modified, added, removed, deleted, unknown = changes
725 modified, added, removed, deleted, unknown = changes
726 commit = modified + added
726 commit = modified + added
727 remove = removed
727 remove = removed
728 else:
728 else:
729 commit = files
729 commit = files
730
730
731 if use_dirstate:
731 if use_dirstate:
732 p1, p2 = self.dirstate.parents()
732 p1, p2 = self.dirstate.parents()
733 update_dirstate = True
733 update_dirstate = True
734 else:
734 else:
735 p1, p2 = p1, p2 or nullid
735 p1, p2 = p1, p2 or nullid
736 update_dirstate = (self.dirstate.parents()[0] == p1)
736 update_dirstate = (self.dirstate.parents()[0] == p1)
737
737
738 c1 = self.changelog.read(p1)
738 c1 = self.changelog.read(p1)
739 c2 = self.changelog.read(p2)
739 c2 = self.changelog.read(p2)
740 m1 = self.manifest.read(c1[0]).copy()
740 m1 = self.manifest.read(c1[0]).copy()
741 m2 = self.manifest.read(c2[0])
741 m2 = self.manifest.read(c2[0])
742
742
743 if use_dirstate:
743 if use_dirstate:
744 branchname = self.workingctx().branch()
744 branchname = self.workingctx().branch()
745 try:
745 try:
746 branchname = branchname.decode('UTF-8').encode('UTF-8')
746 branchname = branchname.decode('UTF-8').encode('UTF-8')
747 except UnicodeDecodeError:
747 except UnicodeDecodeError:
748 raise util.Abort(_('branch name not in UTF-8!'))
748 raise util.Abort(_('branch name not in UTF-8!'))
749 else:
749 else:
750 branchname = ""
750 branchname = ""
751
751
752 if use_dirstate:
752 if use_dirstate:
753 oldname = c1[5].get("branch") # stored in UTF-8
753 oldname = c1[5].get("branch") # stored in UTF-8
754 if not commit and not remove and not force and p2 == nullid and \
754 if not commit and not remove and not force and p2 == nullid and \
755 branchname == oldname:
755 branchname == oldname:
756 self.ui.status(_("nothing changed\n"))
756 self.ui.status(_("nothing changed\n"))
757 return None
757 return None
758
758
759 xp1 = hex(p1)
759 xp1 = hex(p1)
760 if p2 == nullid: xp2 = ''
760 if p2 == nullid: xp2 = ''
761 else: xp2 = hex(p2)
761 else: xp2 = hex(p2)
762
762
763 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
763 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
764
764
765 if not wlock:
765 if not wlock:
766 wlock = self.wlock()
766 wlock = self.wlock()
767 if not lock:
767 if not lock:
768 lock = self.lock()
768 lock = self.lock()
769 tr = self.transaction()
769 tr = self.transaction()
770
770
771 # check in files
771 # check in files
772 new = {}
772 new = {}
773 linkrev = self.changelog.count()
773 linkrev = self.changelog.count()
774 commit.sort()
774 commit.sort()
775 is_exec = util.execfunc(self.root, m1.execf)
775 is_exec = util.execfunc(self.root, m1.execf)
776 is_link = util.linkfunc(self.root, m1.linkf)
776 is_link = util.linkfunc(self.root, m1.linkf)
777 for f in commit:
777 for f in commit:
778 self.ui.note(f + "\n")
778 self.ui.note(f + "\n")
779 try:
779 try:
780 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
780 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
781 m1.set(f, is_exec(f), is_link(f))
781 m1.set(f, is_exec(f), is_link(f))
782 except (OSError, IOError):
782 except (OSError, IOError):
783 if use_dirstate:
783 if use_dirstate:
784 self.ui.warn(_("trouble committing %s!\n") % f)
784 self.ui.warn(_("trouble committing %s!\n") % f)
785 raise
785 raise
786 else:
786 else:
787 remove.append(f)
787 remove.append(f)
788
788
789 # update manifest
789 # update manifest
790 m1.update(new)
790 m1.update(new)
791 remove.sort()
791 remove.sort()
792 removed = []
792 removed = []
793
793
794 for f in remove:
794 for f in remove:
795 if f in m1:
795 if f in m1:
796 del m1[f]
796 del m1[f]
797 removed.append(f)
797 removed.append(f)
798 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
798 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
799
799
800 # add changeset
800 # add changeset
801 new = new.keys()
801 new = new.keys()
802 new.sort()
802 new.sort()
803
803
804 user = user or self.ui.username()
804 user = user or self.ui.username()
805 if not text or force_editor:
805 if not text or force_editor:
806 edittext = []
806 edittext = []
807 if text:
807 if text:
808 edittext.append(text)
808 edittext.append(text)
809 edittext.append("")
809 edittext.append("")
810 edittext.append("HG: user: %s" % user)
810 edittext.append("HG: user: %s" % user)
811 if p2 != nullid:
811 if p2 != nullid:
812 edittext.append("HG: branch merge")
812 edittext.append("HG: branch merge")
813 if branchname:
813 if branchname:
814 edittext.append("HG: branch %s" % util.tolocal(branchname))
814 edittext.append("HG: branch %s" % util.tolocal(branchname))
815 edittext.extend(["HG: changed %s" % f for f in changed])
815 edittext.extend(["HG: changed %s" % f for f in changed])
816 edittext.extend(["HG: removed %s" % f for f in removed])
816 edittext.extend(["HG: removed %s" % f for f in removed])
817 if not changed and not remove:
817 if not changed and not remove:
818 edittext.append("HG: no files changed")
818 edittext.append("HG: no files changed")
819 edittext.append("")
819 edittext.append("")
820 # run editor in the repository root
820 # run editor in the repository root
821 olddir = os.getcwd()
821 olddir = os.getcwd()
822 os.chdir(self.root)
822 os.chdir(self.root)
823 text = self.ui.edit("\n".join(edittext), user)
823 text = self.ui.edit("\n".join(edittext), user)
824 os.chdir(olddir)
824 os.chdir(olddir)
825
825
826 lines = [line.rstrip() for line in text.rstrip().splitlines()]
826 lines = [line.rstrip() for line in text.rstrip().splitlines()]
827 while lines and not lines[0]:
827 while lines and not lines[0]:
828 del lines[0]
828 del lines[0]
829 if not lines:
829 if not lines:
830 return None
830 return None
831 text = '\n'.join(lines)
831 text = '\n'.join(lines)
832 if branchname:
832 if branchname:
833 extra["branch"] = branchname
833 extra["branch"] = branchname
834 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
834 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
835 user, date, extra)
835 user, date, extra)
836 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
836 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
837 parent2=xp2)
837 parent2=xp2)
838 tr.close()
838 tr.close()
839
839
840 if self.branchcache and "branch" in extra:
840 if self.branchcache and "branch" in extra:
841 self.branchcache[util.tolocal(extra["branch"])] = n
841 self.branchcache[util.tolocal(extra["branch"])] = n
842
842
843 if use_dirstate or update_dirstate:
843 if use_dirstate or update_dirstate:
844 self.dirstate.setparents(n)
844 self.dirstate.setparents(n)
845 if use_dirstate:
845 if use_dirstate:
846 self.dirstate.update(new, "n")
846 self.dirstate.update(new, "n")
847 self.dirstate.forget(removed)
847 self.dirstate.forget(removed)
848
848
849 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
849 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
850 return n
850 return n
851
851
852 def walk(self, node=None, files=[], match=util.always, badmatch=None):
852 def walk(self, node=None, files=[], match=util.always, badmatch=None):
853 '''
853 '''
854 walk recursively through the directory tree or a given
854 walk recursively through the directory tree or a given
855 changeset, finding all files matched by the match
855 changeset, finding all files matched by the match
856 function
856 function
857
857
858 results are yielded in a tuple (src, filename), where src
858 results are yielded in a tuple (src, filename), where src
859 is one of:
859 is one of:
860 'f' the file was found in the directory tree
860 'f' the file was found in the directory tree
861 'm' the file was only in the dirstate and not in the tree
861 'm' the file was only in the dirstate and not in the tree
862 'b' file was not found and matched badmatch
862 'b' file was not found and matched badmatch
863 '''
863 '''
864
864
865 if node:
865 if node:
866 fdict = dict.fromkeys(files)
866 fdict = dict.fromkeys(files)
867 # for dirstate.walk, files=['.'] means "walk the whole tree".
867 # for dirstate.walk, files=['.'] means "walk the whole tree".
868 # follow that here, too
868 # follow that here, too
869 fdict.pop('.', None)
869 fdict.pop('.', None)
870 mdict = self.manifest.read(self.changelog.read(node)[0])
870 mdict = self.manifest.read(self.changelog.read(node)[0])
871 mfiles = mdict.keys()
871 mfiles = mdict.keys()
872 mfiles.sort()
872 mfiles.sort()
873 for fn in mfiles:
873 for fn in mfiles:
874 for ffn in fdict:
874 for ffn in fdict:
875 # match if the file is the exact name or a directory
875 # match if the file is the exact name or a directory
876 if ffn == fn or fn.startswith("%s/" % ffn):
876 if ffn == fn or fn.startswith("%s/" % ffn):
877 del fdict[ffn]
877 del fdict[ffn]
878 break
878 break
879 if match(fn):
879 if match(fn):
880 yield 'm', fn
880 yield 'm', fn
881 ffiles = fdict.keys()
881 ffiles = fdict.keys()
882 ffiles.sort()
882 ffiles.sort()
883 for fn in ffiles:
883 for fn in ffiles:
884 if badmatch and badmatch(fn):
884 if badmatch and badmatch(fn):
885 if match(fn):
885 if match(fn):
886 yield 'b', fn
886 yield 'b', fn
887 else:
887 else:
888 self.ui.warn(_('%s: No such file in rev %s\n') % (
888 self.ui.warn(_('%s: No such file in rev %s\n') % (
889 util.pathto(self.root, self.getcwd(), fn), short(node)))
889 util.pathto(self.root, self.getcwd(), fn), short(node)))
890 else:
890 else:
891 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
891 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
892 yield src, fn
892 yield src, fn
893
893
894 def status(self, node1=None, node2=None, files=[], match=util.always,
894 def status(self, node1=None, node2=None, files=[], match=util.always,
895 wlock=None, list_ignored=False, list_clean=False):
895 wlock=None, list_ignored=False, list_clean=False):
896 """return status of files between two nodes or node and working directory
896 """return status of files between two nodes or node and working directory
897
897
898 If node1 is None, use the first dirstate parent instead.
898 If node1 is None, use the first dirstate parent instead.
899 If node2 is None, compare node1 with working directory.
899 If node2 is None, compare node1 with working directory.
900 """
900 """
901
901
902 def fcmp(fn, getnode):
902 def fcmp(fn, getnode):
903 t1 = self.wread(fn)
903 t1 = self.wread(fn)
904 return self.file(fn).cmp(getnode(fn), t1)
904 return self.file(fn).cmp(getnode(fn), t1)
905
905
906 def mfmatches(node):
906 def mfmatches(node):
907 change = self.changelog.read(node)
907 change = self.changelog.read(node)
908 mf = self.manifest.read(change[0]).copy()
908 mf = self.manifest.read(change[0]).copy()
909 for fn in mf.keys():
909 for fn in mf.keys():
910 if not match(fn):
910 if not match(fn):
911 del mf[fn]
911 del mf[fn]
912 return mf
912 return mf
913
913
914 modified, added, removed, deleted, unknown = [], [], [], [], []
914 modified, added, removed, deleted, unknown = [], [], [], [], []
915 ignored, clean = [], []
915 ignored, clean = [], []
916
916
917 compareworking = False
917 compareworking = False
918 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
918 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
919 compareworking = True
919 compareworking = True
920
920
921 if not compareworking:
921 if not compareworking:
922 # read the manifest from node1 before the manifest from node2,
922 # read the manifest from node1 before the manifest from node2,
923 # so that we'll hit the manifest cache if we're going through
923 # so that we'll hit the manifest cache if we're going through
924 # all the revisions in parent->child order.
924 # all the revisions in parent->child order.
925 mf1 = mfmatches(node1)
925 mf1 = mfmatches(node1)
926
926
927 mywlock = False
927 mywlock = False
928
928
929 # are we comparing the working directory?
929 # are we comparing the working directory?
930 if not node2:
930 if not node2:
931 (lookup, modified, added, removed, deleted, unknown,
931 (lookup, modified, added, removed, deleted, unknown,
932 ignored, clean) = self.dirstate.status(files, match,
932 ignored, clean) = self.dirstate.status(files, match,
933 list_ignored, list_clean)
933 list_ignored, list_clean)
934
934
935 # are we comparing working dir against its parent?
935 # are we comparing working dir against its parent?
936 if compareworking:
936 if compareworking:
937 if lookup:
937 if lookup:
938 # do a full compare of any files that might have changed
938 # do a full compare of any files that might have changed
939 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
939 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
940 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
940 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
941 nullid)
941 nullid)
942 for f in lookup:
942 for f in lookup:
943 if fcmp(f, getnode):
943 if fcmp(f, getnode):
944 modified.append(f)
944 modified.append(f)
945 else:
945 else:
946 clean.append(f)
946 clean.append(f)
947 if not wlock and not mywlock:
947 if not wlock and not mywlock:
948 mywlock = True
948 mywlock = True
949 try:
949 try:
950 wlock = self.wlock(wait=0)
950 wlock = self.wlock(wait=0)
951 except lock.LockException:
951 except lock.LockException:
952 pass
952 pass
953 if wlock:
953 if wlock:
954 self.dirstate.update([f], "n")
954 self.dirstate.update([f], "n")
955 else:
955 else:
956 # we are comparing working dir against non-parent
956 # we are comparing working dir against non-parent
957 # generate a pseudo-manifest for the working dir
957 # generate a pseudo-manifest for the working dir
958 # XXX: create it in dirstate.py ?
958 # XXX: create it in dirstate.py ?
959 mf2 = mfmatches(self.dirstate.parents()[0])
959 mf2 = mfmatches(self.dirstate.parents()[0])
960 is_exec = util.execfunc(self.root, mf2.execf)
960 is_exec = util.execfunc(self.root, mf2.execf)
961 is_link = util.linkfunc(self.root, mf2.linkf)
961 is_link = util.linkfunc(self.root, mf2.linkf)
962 for f in lookup + modified + added:
962 for f in lookup + modified + added:
963 mf2[f] = ""
963 mf2[f] = ""
964 mf2.set(f, is_exec(f), is_link(f))
964 mf2.set(f, is_exec(f), is_link(f))
965 for f in removed:
965 for f in removed:
966 if f in mf2:
966 if f in mf2:
967 del mf2[f]
967 del mf2[f]
968
968
969 if mywlock and wlock:
969 if mywlock and wlock:
970 wlock.release()
970 wlock.release()
971 else:
971 else:
972 # we are comparing two revisions
972 # we are comparing two revisions
973 mf2 = mfmatches(node2)
973 mf2 = mfmatches(node2)
974
974
975 if not compareworking:
975 if not compareworking:
976 # flush lists from dirstate before comparing manifests
976 # flush lists from dirstate before comparing manifests
977 modified, added, clean = [], [], []
977 modified, added, clean = [], [], []
978
978
979 # make sure to sort the files so we talk to the disk in a
979 # make sure to sort the files so we talk to the disk in a
980 # reasonable order
980 # reasonable order
981 mf2keys = mf2.keys()
981 mf2keys = mf2.keys()
982 mf2keys.sort()
982 mf2keys.sort()
983 getnode = lambda fn: mf1.get(fn, nullid)
983 getnode = lambda fn: mf1.get(fn, nullid)
984 for fn in mf2keys:
984 for fn in mf2keys:
985 if mf1.has_key(fn):
985 if mf1.has_key(fn):
986 if mf1.flags(fn) != mf2.flags(fn) or \
986 if mf1.flags(fn) != mf2.flags(fn) or \
987 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
987 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
988 fcmp(fn, getnode))):
988 fcmp(fn, getnode))):
989 modified.append(fn)
989 modified.append(fn)
990 elif list_clean:
990 elif list_clean:
991 clean.append(fn)
991 clean.append(fn)
992 del mf1[fn]
992 del mf1[fn]
993 else:
993 else:
994 added.append(fn)
994 added.append(fn)
995
995
996 removed = mf1.keys()
996 removed = mf1.keys()
997
997
998 # sort and return results:
998 # sort and return results:
999 for l in modified, added, removed, deleted, unknown, ignored, clean:
999 for l in modified, added, removed, deleted, unknown, ignored, clean:
1000 l.sort()
1000 l.sort()
1001 return (modified, added, removed, deleted, unknown, ignored, clean)
1001 return (modified, added, removed, deleted, unknown, ignored, clean)
1002
1002
1003 def add(self, list, wlock=None):
1003 def add(self, list, wlock=None):
1004 if not wlock:
1004 if not wlock:
1005 wlock = self.wlock()
1005 wlock = self.wlock()
1006 for f in list:
1006 for f in list:
1007 p = self.wjoin(f)
1007 p = self.wjoin(f)
1008 islink = os.path.islink(p)
1008 islink = os.path.islink(p)
1009 size = os.lstat(p).st_size
1009 size = os.lstat(p).st_size
1010 if size > 10000000:
1010 if size > 10000000:
1011 self.ui.warn(_("%s: files over 10MB may cause memory and"
1011 self.ui.warn(_("%s: files over 10MB may cause memory and"
1012 " performance problems\n"
1012 " performance problems\n"
1013 "(use 'hg revert %s' to unadd the file)\n")
1013 "(use 'hg revert %s' to unadd the file)\n")
1014 % (f, f))
1014 % (f, f))
1015 if not islink and not os.path.exists(p):
1015 if not islink and not os.path.exists(p):
1016 self.ui.warn(_("%s does not exist!\n") % f)
1016 self.ui.warn(_("%s does not exist!\n") % f)
1017 elif not islink and not os.path.isfile(p):
1017 elif not islink and not os.path.isfile(p):
1018 self.ui.warn(_("%s not added: only files and symlinks "
1018 self.ui.warn(_("%s not added: only files and symlinks "
1019 "supported currently\n") % f)
1019 "supported currently\n") % f)
1020 elif self.dirstate.state(f) in 'an':
1020 elif self.dirstate.state(f) in 'an':
1021 self.ui.warn(_("%s already tracked!\n") % f)
1021 self.ui.warn(_("%s already tracked!\n") % f)
1022 else:
1022 else:
1023 self.dirstate.update([f], "a")
1023 self.dirstate.update([f], "a")
1024
1024
1025 def forget(self, list, wlock=None):
1025 def forget(self, list, wlock=None):
1026 if not wlock:
1026 if not wlock:
1027 wlock = self.wlock()
1027 wlock = self.wlock()
1028 for f in list:
1028 for f in list:
1029 if self.dirstate.state(f) not in 'ai':
1029 if self.dirstate.state(f) not in 'ai':
1030 self.ui.warn(_("%s not added!\n") % f)
1030 self.ui.warn(_("%s not added!\n") % f)
1031 else:
1031 else:
1032 self.dirstate.forget([f])
1032 self.dirstate.forget([f])
1033
1033
1034 def remove(self, list, unlink=False, wlock=None):
1034 def remove(self, list, unlink=False, wlock=None):
1035 if unlink:
1035 if unlink:
1036 for f in list:
1036 for f in list:
1037 try:
1037 try:
1038 util.unlink(self.wjoin(f))
1038 util.unlink(self.wjoin(f))
1039 except OSError, inst:
1039 except OSError, inst:
1040 if inst.errno != errno.ENOENT:
1040 if inst.errno != errno.ENOENT:
1041 raise
1041 raise
1042 if not wlock:
1042 if not wlock:
1043 wlock = self.wlock()
1043 wlock = self.wlock()
1044 for f in list:
1044 for f in list:
1045 if unlink and os.path.exists(self.wjoin(f)):
1045 if unlink and os.path.exists(self.wjoin(f)):
1046 self.ui.warn(_("%s still exists!\n") % f)
1046 self.ui.warn(_("%s still exists!\n") % f)
1047 elif self.dirstate.state(f) == 'a':
1047 elif self.dirstate.state(f) == 'a':
1048 self.dirstate.forget([f])
1048 self.dirstate.forget([f])
1049 elif f not in self.dirstate:
1049 elif f not in self.dirstate:
1050 self.ui.warn(_("%s not tracked!\n") % f)
1050 self.ui.warn(_("%s not tracked!\n") % f)
1051 else:
1051 else:
1052 self.dirstate.update([f], "r")
1052 self.dirstate.update([f], "r")
1053
1053
1054 def undelete(self, list, wlock=None):
1054 def undelete(self, list, wlock=None):
1055 p = self.dirstate.parents()[0]
1055 p = self.dirstate.parents()[0]
1056 mn = self.changelog.read(p)[0]
1056 mn = self.changelog.read(p)[0]
1057 m = self.manifest.read(mn)
1057 m = self.manifest.read(mn)
1058 if not wlock:
1058 if not wlock:
1059 wlock = self.wlock()
1059 wlock = self.wlock()
1060 for f in list:
1060 for f in list:
1061 if self.dirstate.state(f) not in "r":
1061 if self.dirstate.state(f) not in "r":
1062 self.ui.warn("%s not removed!\n" % f)
1062 self.ui.warn("%s not removed!\n" % f)
1063 else:
1063 else:
1064 t = self.file(f).read(m[f])
1064 t = self.file(f).read(m[f])
1065 self.wwrite(f, t, m.flags(f))
1065 self.wwrite(f, t, m.flags(f))
1066 self.dirstate.update([f], "n")
1066 self.dirstate.update([f], "n")
1067
1067
1068 def copy(self, source, dest, wlock=None):
1068 def copy(self, source, dest, wlock=None):
1069 p = self.wjoin(dest)
1069 p = self.wjoin(dest)
1070 if not (os.path.exists(p) or os.path.islink(p)):
1070 if not (os.path.exists(p) or os.path.islink(p)):
1071 self.ui.warn(_("%s does not exist!\n") % dest)
1071 self.ui.warn(_("%s does not exist!\n") % dest)
1072 elif not (os.path.isfile(p) or os.path.islink(p)):
1072 elif not (os.path.isfile(p) or os.path.islink(p)):
1073 self.ui.warn(_("copy failed: %s is not a file or a "
1073 self.ui.warn(_("copy failed: %s is not a file or a "
1074 "symbolic link\n") % dest)
1074 "symbolic link\n") % dest)
1075 else:
1075 else:
1076 if not wlock:
1076 if not wlock:
1077 wlock = self.wlock()
1077 wlock = self.wlock()
1078 if self.dirstate.state(dest) == '?':
1078 if self.dirstate.state(dest) == '?':
1079 self.dirstate.update([dest], "a")
1079 self.dirstate.update([dest], "a")
1080 self.dirstate.copy(source, dest)
1080 self.dirstate.copy(source, dest)
1081
1081
1082 def heads(self, start=None):
1082 def heads(self, start=None):
1083 heads = self.changelog.heads(start)
1083 heads = self.changelog.heads(start)
1084 # sort the output in rev descending order
1084 # sort the output in rev descending order
1085 heads = [(-self.changelog.rev(h), h) for h in heads]
1085 heads = [(-self.changelog.rev(h), h) for h in heads]
1086 heads.sort()
1086 heads.sort()
1087 return [n for (r, n) in heads]
1087 return [n for (r, n) in heads]
1088
1088
1089 def branches(self, nodes):
1089 def branches(self, nodes):
1090 if not nodes:
1090 if not nodes:
1091 nodes = [self.changelog.tip()]
1091 nodes = [self.changelog.tip()]
1092 b = []
1092 b = []
1093 for n in nodes:
1093 for n in nodes:
1094 t = n
1094 t = n
1095 while 1:
1095 while 1:
1096 p = self.changelog.parents(n)
1096 p = self.changelog.parents(n)
1097 if p[1] != nullid or p[0] == nullid:
1097 if p[1] != nullid or p[0] == nullid:
1098 b.append((t, n, p[0], p[1]))
1098 b.append((t, n, p[0], p[1]))
1099 break
1099 break
1100 n = p[0]
1100 n = p[0]
1101 return b
1101 return b
1102
1102
1103 def between(self, pairs):
1103 def between(self, pairs):
1104 r = []
1104 r = []
1105
1105
1106 for top, bottom in pairs:
1106 for top, bottom in pairs:
1107 n, l, i = top, [], 0
1107 n, l, i = top, [], 0
1108 f = 1
1108 f = 1
1109
1109
1110 while n != bottom:
1110 while n != bottom:
1111 p = self.changelog.parents(n)[0]
1111 p = self.changelog.parents(n)[0]
1112 if i == f:
1112 if i == f:
1113 l.append(n)
1113 l.append(n)
1114 f = f * 2
1114 f = f * 2
1115 n = p
1115 n = p
1116 i += 1
1116 i += 1
1117
1117
1118 r.append(l)
1118 r.append(l)
1119
1119
1120 return r
1120 return r
1121
1121
1122 def findincoming(self, remote, base=None, heads=None, force=False):
1122 def findincoming(self, remote, base=None, heads=None, force=False):
1123 """Return list of roots of the subsets of missing nodes from remote
1123 """Return list of roots of the subsets of missing nodes from remote
1124
1124
1125 If base dict is specified, assume that these nodes and their parents
1125 If base dict is specified, assume that these nodes and their parents
1126 exist on the remote side and that no child of a node of base exists
1126 exist on the remote side and that no child of a node of base exists
1127 in both remote and self.
1127 in both remote and self.
1128 Furthermore base will be updated to include the nodes that exists
1128 Furthermore base will be updated to include the nodes that exists
1129 in self and remote but no children exists in self and remote.
1129 in self and remote but no children exists in self and remote.
1130 If a list of heads is specified, return only nodes which are heads
1130 If a list of heads is specified, return only nodes which are heads
1131 or ancestors of these heads.
1131 or ancestors of these heads.
1132
1132
1133 All the ancestors of base are in self and in remote.
1133 All the ancestors of base are in self and in remote.
1134 All the descendants of the list returned are missing in self.
1134 All the descendants of the list returned are missing in self.
1135 (and so we know that the rest of the nodes are missing in remote, see
1135 (and so we know that the rest of the nodes are missing in remote, see
1136 outgoing)
1136 outgoing)
1137 """
1137 """
1138 m = self.changelog.nodemap
1138 m = self.changelog.nodemap
1139 search = []
1139 search = []
1140 fetch = {}
1140 fetch = {}
1141 seen = {}
1141 seen = {}
1142 seenbranch = {}
1142 seenbranch = {}
1143 if base == None:
1143 if base == None:
1144 base = {}
1144 base = {}
1145
1145
1146 if not heads:
1146 if not heads:
1147 heads = remote.heads()
1147 heads = remote.heads()
1148
1148
1149 if self.changelog.tip() == nullid:
1149 if self.changelog.tip() == nullid:
1150 base[nullid] = 1
1150 base[nullid] = 1
1151 if heads != [nullid]:
1151 if heads != [nullid]:
1152 return [nullid]
1152 return [nullid]
1153 return []
1153 return []
1154
1154
1155 # assume we're closer to the tip than the root
1155 # assume we're closer to the tip than the root
1156 # and start by examining the heads
1156 # and start by examining the heads
1157 self.ui.status(_("searching for changes\n"))
1157 self.ui.status(_("searching for changes\n"))
1158
1158
1159 unknown = []
1159 unknown = []
1160 for h in heads:
1160 for h in heads:
1161 if h not in m:
1161 if h not in m:
1162 unknown.append(h)
1162 unknown.append(h)
1163 else:
1163 else:
1164 base[h] = 1
1164 base[h] = 1
1165
1165
1166 if not unknown:
1166 if not unknown:
1167 return []
1167 return []
1168
1168
1169 req = dict.fromkeys(unknown)
1169 req = dict.fromkeys(unknown)
1170 reqcnt = 0
1170 reqcnt = 0
1171
1171
1172 # search through remote branches
1172 # search through remote branches
1173 # a 'branch' here is a linear segment of history, with four parts:
1173 # a 'branch' here is a linear segment of history, with four parts:
1174 # head, root, first parent, second parent
1174 # head, root, first parent, second parent
1175 # (a branch always has two parents (or none) by definition)
1175 # (a branch always has two parents (or none) by definition)
1176 unknown = remote.branches(unknown)
1176 unknown = remote.branches(unknown)
1177 while unknown:
1177 while unknown:
1178 r = []
1178 r = []
1179 while unknown:
1179 while unknown:
1180 n = unknown.pop(0)
1180 n = unknown.pop(0)
1181 if n[0] in seen:
1181 if n[0] in seen:
1182 continue
1182 continue
1183
1183
1184 self.ui.debug(_("examining %s:%s\n")
1184 self.ui.debug(_("examining %s:%s\n")
1185 % (short(n[0]), short(n[1])))
1185 % (short(n[0]), short(n[1])))
1186 if n[0] == nullid: # found the end of the branch
1186 if n[0] == nullid: # found the end of the branch
1187 pass
1187 pass
1188 elif n in seenbranch:
1188 elif n in seenbranch:
1189 self.ui.debug(_("branch already found\n"))
1189 self.ui.debug(_("branch already found\n"))
1190 continue
1190 continue
1191 elif n[1] and n[1] in m: # do we know the base?
1191 elif n[1] and n[1] in m: # do we know the base?
1192 self.ui.debug(_("found incomplete branch %s:%s\n")
1192 self.ui.debug(_("found incomplete branch %s:%s\n")
1193 % (short(n[0]), short(n[1])))
1193 % (short(n[0]), short(n[1])))
1194 search.append(n) # schedule branch range for scanning
1194 search.append(n) # schedule branch range for scanning
1195 seenbranch[n] = 1
1195 seenbranch[n] = 1
1196 else:
1196 else:
1197 if n[1] not in seen and n[1] not in fetch:
1197 if n[1] not in seen and n[1] not in fetch:
1198 if n[2] in m and n[3] in m:
1198 if n[2] in m and n[3] in m:
1199 self.ui.debug(_("found new changeset %s\n") %
1199 self.ui.debug(_("found new changeset %s\n") %
1200 short(n[1]))
1200 short(n[1]))
1201 fetch[n[1]] = 1 # earliest unknown
1201 fetch[n[1]] = 1 # earliest unknown
1202 for p in n[2:4]:
1202 for p in n[2:4]:
1203 if p in m:
1203 if p in m:
1204 base[p] = 1 # latest known
1204 base[p] = 1 # latest known
1205
1205
1206 for p in n[2:4]:
1206 for p in n[2:4]:
1207 if p not in req and p not in m:
1207 if p not in req and p not in m:
1208 r.append(p)
1208 r.append(p)
1209 req[p] = 1
1209 req[p] = 1
1210 seen[n[0]] = 1
1210 seen[n[0]] = 1
1211
1211
1212 if r:
1212 if r:
1213 reqcnt += 1
1213 reqcnt += 1
1214 self.ui.debug(_("request %d: %s\n") %
1214 self.ui.debug(_("request %d: %s\n") %
1215 (reqcnt, " ".join(map(short, r))))
1215 (reqcnt, " ".join(map(short, r))))
1216 for p in xrange(0, len(r), 10):
1216 for p in xrange(0, len(r), 10):
1217 for b in remote.branches(r[p:p+10]):
1217 for b in remote.branches(r[p:p+10]):
1218 self.ui.debug(_("received %s:%s\n") %
1218 self.ui.debug(_("received %s:%s\n") %
1219 (short(b[0]), short(b[1])))
1219 (short(b[0]), short(b[1])))
1220 unknown.append(b)
1220 unknown.append(b)
1221
1221
1222 # do binary search on the branches we found
1222 # do binary search on the branches we found
1223 while search:
1223 while search:
1224 n = search.pop(0)
1224 n = search.pop(0)
1225 reqcnt += 1
1225 reqcnt += 1
1226 l = remote.between([(n[0], n[1])])[0]
1226 l = remote.between([(n[0], n[1])])[0]
1227 l.append(n[1])
1227 l.append(n[1])
1228 p = n[0]
1228 p = n[0]
1229 f = 1
1229 f = 1
1230 for i in l:
1230 for i in l:
1231 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1231 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1232 if i in m:
1232 if i in m:
1233 if f <= 2:
1233 if f <= 2:
1234 self.ui.debug(_("found new branch changeset %s\n") %
1234 self.ui.debug(_("found new branch changeset %s\n") %
1235 short(p))
1235 short(p))
1236 fetch[p] = 1
1236 fetch[p] = 1
1237 base[i] = 1
1237 base[i] = 1
1238 else:
1238 else:
1239 self.ui.debug(_("narrowed branch search to %s:%s\n")
1239 self.ui.debug(_("narrowed branch search to %s:%s\n")
1240 % (short(p), short(i)))
1240 % (short(p), short(i)))
1241 search.append((p, i))
1241 search.append((p, i))
1242 break
1242 break
1243 p, f = i, f * 2
1243 p, f = i, f * 2
1244
1244
1245 # sanity check our fetch list
1245 # sanity check our fetch list
1246 for f in fetch.keys():
1246 for f in fetch.keys():
1247 if f in m:
1247 if f in m:
1248 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1248 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1249
1249
1250 if base.keys() == [nullid]:
1250 if base.keys() == [nullid]:
1251 if force:
1251 if force:
1252 self.ui.warn(_("warning: repository is unrelated\n"))
1252 self.ui.warn(_("warning: repository is unrelated\n"))
1253 else:
1253 else:
1254 raise util.Abort(_("repository is unrelated"))
1254 raise util.Abort(_("repository is unrelated"))
1255
1255
1256 self.ui.debug(_("found new changesets starting at ") +
1256 self.ui.debug(_("found new changesets starting at ") +
1257 " ".join([short(f) for f in fetch]) + "\n")
1257 " ".join([short(f) for f in fetch]) + "\n")
1258
1258
1259 self.ui.debug(_("%d total queries\n") % reqcnt)
1259 self.ui.debug(_("%d total queries\n") % reqcnt)
1260
1260
1261 return fetch.keys()
1261 return fetch.keys()
1262
1262
1263 def findoutgoing(self, remote, base=None, heads=None, force=False):
1263 def findoutgoing(self, remote, base=None, heads=None, force=False):
1264 """Return list of nodes that are roots of subsets not in remote
1264 """Return list of nodes that are roots of subsets not in remote
1265
1265
1266 If base dict is specified, assume that these nodes and their parents
1266 If base dict is specified, assume that these nodes and their parents
1267 exist on the remote side.
1267 exist on the remote side.
1268 If a list of heads is specified, return only nodes which are heads
1268 If a list of heads is specified, return only nodes which are heads
1269 or ancestors of these heads, and return a second element which
1269 or ancestors of these heads, and return a second element which
1270 contains all remote heads which get new children.
1270 contains all remote heads which get new children.
1271 """
1271 """
1272 if base == None:
1272 if base == None:
1273 base = {}
1273 base = {}
1274 self.findincoming(remote, base, heads, force=force)
1274 self.findincoming(remote, base, heads, force=force)
1275
1275
1276 self.ui.debug(_("common changesets up to ")
1276 self.ui.debug(_("common changesets up to ")
1277 + " ".join(map(short, base.keys())) + "\n")
1277 + " ".join(map(short, base.keys())) + "\n")
1278
1278
1279 remain = dict.fromkeys(self.changelog.nodemap)
1279 remain = dict.fromkeys(self.changelog.nodemap)
1280
1280
1281 # prune everything remote has from the tree
1281 # prune everything remote has from the tree
1282 del remain[nullid]
1282 del remain[nullid]
1283 remove = base.keys()
1283 remove = base.keys()
1284 while remove:
1284 while remove:
1285 n = remove.pop(0)
1285 n = remove.pop(0)
1286 if n in remain:
1286 if n in remain:
1287 del remain[n]
1287 del remain[n]
1288 for p in self.changelog.parents(n):
1288 for p in self.changelog.parents(n):
1289 remove.append(p)
1289 remove.append(p)
1290
1290
1291 # find every node whose parents have been pruned
1291 # find every node whose parents have been pruned
1292 subset = []
1292 subset = []
1293 # find every remote head that will get new children
1293 # find every remote head that will get new children
1294 updated_heads = {}
1294 updated_heads = {}
1295 for n in remain:
1295 for n in remain:
1296 p1, p2 = self.changelog.parents(n)
1296 p1, p2 = self.changelog.parents(n)
1297 if p1 not in remain and p2 not in remain:
1297 if p1 not in remain and p2 not in remain:
1298 subset.append(n)
1298 subset.append(n)
1299 if heads:
1299 if heads:
1300 if p1 in heads:
1300 if p1 in heads:
1301 updated_heads[p1] = True
1301 updated_heads[p1] = True
1302 if p2 in heads:
1302 if p2 in heads:
1303 updated_heads[p2] = True
1303 updated_heads[p2] = True
1304
1304
1305 # this is the set of all roots we have to push
1305 # this is the set of all roots we have to push
1306 if heads:
1306 if heads:
1307 return subset, updated_heads.keys()
1307 return subset, updated_heads.keys()
1308 else:
1308 else:
1309 return subset
1309 return subset
1310
1310
1311 def pull(self, remote, heads=None, force=False, lock=None):
1311 def pull(self, remote, heads=None, force=False, lock=None):
1312 mylock = False
1312 mylock = False
1313 if not lock:
1313 if not lock:
1314 lock = self.lock()
1314 lock = self.lock()
1315 mylock = True
1315 mylock = True
1316
1316
1317 try:
1317 try:
1318 fetch = self.findincoming(remote, force=force)
1318 fetch = self.findincoming(remote, force=force)
1319 if fetch == [nullid]:
1319 if fetch == [nullid]:
1320 self.ui.status(_("requesting all changes\n"))
1320 self.ui.status(_("requesting all changes\n"))
1321
1321
1322 if not fetch:
1322 if not fetch:
1323 self.ui.status(_("no changes found\n"))
1323 self.ui.status(_("no changes found\n"))
1324 return 0
1324 return 0
1325
1325
1326 if heads is None:
1326 if heads is None:
1327 cg = remote.changegroup(fetch, 'pull')
1327 cg = remote.changegroup(fetch, 'pull')
1328 else:
1328 else:
1329 if 'changegroupsubset' not in remote.capabilities:
1329 if 'changegroupsubset' not in remote.capabilities:
1330 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1330 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1331 cg = remote.changegroupsubset(fetch, heads, 'pull')
1331 cg = remote.changegroupsubset(fetch, heads, 'pull')
1332 return self.addchangegroup(cg, 'pull', remote.url())
1332 return self.addchangegroup(cg, 'pull', remote.url())
1333 finally:
1333 finally:
1334 if mylock:
1334 if mylock:
1335 lock.release()
1335 lock.release()
1336
1336
1337 def push(self, remote, force=False, revs=None):
1337 def push(self, remote, force=False, revs=None):
1338 # there are two ways to push to remote repo:
1338 # there are two ways to push to remote repo:
1339 #
1339 #
1340 # addchangegroup assumes local user can lock remote
1340 # addchangegroup assumes local user can lock remote
1341 # repo (local filesystem, old ssh servers).
1341 # repo (local filesystem, old ssh servers).
1342 #
1342 #
1343 # unbundle assumes local user cannot lock remote repo (new ssh
1343 # unbundle assumes local user cannot lock remote repo (new ssh
1344 # servers, http servers).
1344 # servers, http servers).
1345
1345
1346 if remote.capable('unbundle'):
1346 if remote.capable('unbundle'):
1347 return self.push_unbundle(remote, force, revs)
1347 return self.push_unbundle(remote, force, revs)
1348 return self.push_addchangegroup(remote, force, revs)
1348 return self.push_addchangegroup(remote, force, revs)
1349
1349
1350 def prepush(self, remote, force, revs):
1350 def prepush(self, remote, force, revs):
1351 base = {}
1351 base = {}
1352 remote_heads = remote.heads()
1352 remote_heads = remote.heads()
1353 inc = self.findincoming(remote, base, remote_heads, force=force)
1353 inc = self.findincoming(remote, base, remote_heads, force=force)
1354
1354
1355 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1355 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1356 if revs is not None:
1356 if revs is not None:
1357 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1357 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1358 else:
1358 else:
1359 bases, heads = update, self.changelog.heads()
1359 bases, heads = update, self.changelog.heads()
1360
1360
1361 if not bases:
1361 if not bases:
1362 self.ui.status(_("no changes found\n"))
1362 self.ui.status(_("no changes found\n"))
1363 return None, 1
1363 return None, 1
1364 elif not force:
1364 elif not force:
1365 # check if we're creating new remote heads
1365 # check if we're creating new remote heads
1366 # to be a remote head after push, node must be either
1366 # to be a remote head after push, node must be either
1367 # - unknown locally
1367 # - unknown locally
1368 # - a local outgoing head descended from update
1368 # - a local outgoing head descended from update
1369 # - a remote head that's known locally and not
1369 # - a remote head that's known locally and not
1370 # ancestral to an outgoing head
1370 # ancestral to an outgoing head
1371
1371
1372 warn = 0
1372 warn = 0
1373
1373
1374 if remote_heads == [nullid]:
1374 if remote_heads == [nullid]:
1375 warn = 0
1375 warn = 0
1376 elif not revs and len(heads) > len(remote_heads):
1376 elif not revs and len(heads) > len(remote_heads):
1377 warn = 1
1377 warn = 1
1378 else:
1378 else:
1379 newheads = list(heads)
1379 newheads = list(heads)
1380 for r in remote_heads:
1380 for r in remote_heads:
1381 if r in self.changelog.nodemap:
1381 if r in self.changelog.nodemap:
1382 desc = self.changelog.heads(r, heads)
1382 desc = self.changelog.heads(r, heads)
1383 l = [h for h in heads if h in desc]
1383 l = [h for h in heads if h in desc]
1384 if not l:
1384 if not l:
1385 newheads.append(r)
1385 newheads.append(r)
1386 else:
1386 else:
1387 newheads.append(r)
1387 newheads.append(r)
1388 if len(newheads) > len(remote_heads):
1388 if len(newheads) > len(remote_heads):
1389 warn = 1
1389 warn = 1
1390
1390
1391 if warn:
1391 if warn:
1392 self.ui.warn(_("abort: push creates new remote branches!\n"))
1392 self.ui.warn(_("abort: push creates new remote branches!\n"))
1393 self.ui.status(_("(did you forget to merge?"
1393 self.ui.status(_("(did you forget to merge?"
1394 " use push -f to force)\n"))
1394 " use push -f to force)\n"))
1395 return None, 1
1395 return None, 1
1396 elif inc:
1396 elif inc:
1397 self.ui.warn(_("note: unsynced remote changes!\n"))
1397 self.ui.warn(_("note: unsynced remote changes!\n"))
1398
1398
1399
1399
1400 if revs is None:
1400 if revs is None:
1401 cg = self.changegroup(update, 'push')
1401 cg = self.changegroup(update, 'push')
1402 else:
1402 else:
1403 cg = self.changegroupsubset(update, revs, 'push')
1403 cg = self.changegroupsubset(update, revs, 'push')
1404 return cg, remote_heads
1404 return cg, remote_heads
1405
1405
1406 def push_addchangegroup(self, remote, force, revs):
1406 def push_addchangegroup(self, remote, force, revs):
1407 lock = remote.lock()
1407 lock = remote.lock()
1408
1408
1409 ret = self.prepush(remote, force, revs)
1409 ret = self.prepush(remote, force, revs)
1410 if ret[0] is not None:
1410 if ret[0] is not None:
1411 cg, remote_heads = ret
1411 cg, remote_heads = ret
1412 return remote.addchangegroup(cg, 'push', self.url())
1412 return remote.addchangegroup(cg, 'push', self.url())
1413 return ret[1]
1413 return ret[1]
1414
1414
1415 def push_unbundle(self, remote, force, revs):
1415 def push_unbundle(self, remote, force, revs):
1416 # local repo finds heads on server, finds out what revs it
1416 # local repo finds heads on server, finds out what revs it
1417 # must push. once revs transferred, if server finds it has
1417 # must push. once revs transferred, if server finds it has
1418 # different heads (someone else won commit/push race), server
1418 # different heads (someone else won commit/push race), server
1419 # aborts.
1419 # aborts.
1420
1420
1421 ret = self.prepush(remote, force, revs)
1421 ret = self.prepush(remote, force, revs)
1422 if ret[0] is not None:
1422 if ret[0] is not None:
1423 cg, remote_heads = ret
1423 cg, remote_heads = ret
1424 if force: remote_heads = ['force']
1424 if force: remote_heads = ['force']
1425 return remote.unbundle(cg, remote_heads, 'push')
1425 return remote.unbundle(cg, remote_heads, 'push')
1426 return ret[1]
1426 return ret[1]
1427
1427
1428 def changegroupinfo(self, nodes):
1428 def changegroupinfo(self, nodes):
1429 self.ui.note(_("%d changesets found\n") % len(nodes))
1429 self.ui.note(_("%d changesets found\n") % len(nodes))
1430 if self.ui.debugflag:
1430 if self.ui.debugflag:
1431 self.ui.debug(_("List of changesets:\n"))
1431 self.ui.debug(_("List of changesets:\n"))
1432 for node in nodes:
1432 for node in nodes:
1433 self.ui.debug("%s\n" % hex(node))
1433 self.ui.debug("%s\n" % hex(node))
1434
1434
1435 def changegroupsubset(self, bases, heads, source):
1435 def changegroupsubset(self, bases, heads, source):
1436 """This function generates a changegroup consisting of all the nodes
1436 """This function generates a changegroup consisting of all the nodes
1437 that are descendents of any of the bases, and ancestors of any of
1437 that are descendents of any of the bases, and ancestors of any of
1438 the heads.
1438 the heads.
1439
1439
1440 It is fairly complex as determining which filenodes and which
1440 It is fairly complex as determining which filenodes and which
1441 manifest nodes need to be included for the changeset to be complete
1441 manifest nodes need to be included for the changeset to be complete
1442 is non-trivial.
1442 is non-trivial.
1443
1443
1444 Another wrinkle is doing the reverse, figuring out which changeset in
1444 Another wrinkle is doing the reverse, figuring out which changeset in
1445 the changegroup a particular filenode or manifestnode belongs to."""
1445 the changegroup a particular filenode or manifestnode belongs to."""
1446
1446
1447 self.hook('preoutgoing', throw=True, source=source)
1447 self.hook('preoutgoing', throw=True, source=source)
1448
1448
1449 # Set up some initial variables
1449 # Set up some initial variables
1450 # Make it easy to refer to self.changelog
1450 # Make it easy to refer to self.changelog
1451 cl = self.changelog
1451 cl = self.changelog
1452 # msng is short for missing - compute the list of changesets in this
1452 # msng is short for missing - compute the list of changesets in this
1453 # changegroup.
1453 # changegroup.
1454 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1454 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1455 self.changegroupinfo(msng_cl_lst)
1455 self.changegroupinfo(msng_cl_lst)
1456 # Some bases may turn out to be superfluous, and some heads may be
1456 # Some bases may turn out to be superfluous, and some heads may be
1457 # too. nodesbetween will return the minimal set of bases and heads
1457 # too. nodesbetween will return the minimal set of bases and heads
1458 # necessary to re-create the changegroup.
1458 # necessary to re-create the changegroup.
1459
1459
1460 # Known heads are the list of heads that it is assumed the recipient
1460 # Known heads are the list of heads that it is assumed the recipient
1461 # of this changegroup will know about.
1461 # of this changegroup will know about.
1462 knownheads = {}
1462 knownheads = {}
1463 # We assume that all parents of bases are known heads.
1463 # We assume that all parents of bases are known heads.
1464 for n in bases:
1464 for n in bases:
1465 for p in cl.parents(n):
1465 for p in cl.parents(n):
1466 if p != nullid:
1466 if p != nullid:
1467 knownheads[p] = 1
1467 knownheads[p] = 1
1468 knownheads = knownheads.keys()
1468 knownheads = knownheads.keys()
1469 if knownheads:
1469 if knownheads:
1470 # Now that we know what heads are known, we can compute which
1470 # Now that we know what heads are known, we can compute which
1471 # changesets are known. The recipient must know about all
1471 # changesets are known. The recipient must know about all
1472 # changesets required to reach the known heads from the null
1472 # changesets required to reach the known heads from the null
1473 # changeset.
1473 # changeset.
1474 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1474 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1475 junk = None
1475 junk = None
1476 # Transform the list into an ersatz set.
1476 # Transform the list into an ersatz set.
1477 has_cl_set = dict.fromkeys(has_cl_set)
1477 has_cl_set = dict.fromkeys(has_cl_set)
1478 else:
1478 else:
1479 # If there were no known heads, the recipient cannot be assumed to
1479 # If there were no known heads, the recipient cannot be assumed to
1480 # know about any changesets.
1480 # know about any changesets.
1481 has_cl_set = {}
1481 has_cl_set = {}
1482
1482
1483 # Make it easy to refer to self.manifest
1483 # Make it easy to refer to self.manifest
1484 mnfst = self.manifest
1484 mnfst = self.manifest
1485 # We don't know which manifests are missing yet
1485 # We don't know which manifests are missing yet
1486 msng_mnfst_set = {}
1486 msng_mnfst_set = {}
1487 # Nor do we know which filenodes are missing.
1487 # Nor do we know which filenodes are missing.
1488 msng_filenode_set = {}
1488 msng_filenode_set = {}
1489
1489
1490 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1490 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1491 junk = None
1491 junk = None
1492
1492
1493 # A changeset always belongs to itself, so the changenode lookup
1493 # A changeset always belongs to itself, so the changenode lookup
1494 # function for a changenode is identity.
1494 # function for a changenode is identity.
1495 def identity(x):
1495 def identity(x):
1496 return x
1496 return x
1497
1497
1498 # A function generating function. Sets up an environment for the
1498 # A function generating function. Sets up an environment for the
1499 # inner function.
1499 # inner function.
1500 def cmp_by_rev_func(revlog):
1500 def cmp_by_rev_func(revlog):
1501 # Compare two nodes by their revision number in the environment's
1501 # Compare two nodes by their revision number in the environment's
1502 # revision history. Since the revision number both represents the
1502 # revision history. Since the revision number both represents the
1503 # most efficient order to read the nodes in, and represents a
1503 # most efficient order to read the nodes in, and represents a
1504 # topological sorting of the nodes, this function is often useful.
1504 # topological sorting of the nodes, this function is often useful.
1505 def cmp_by_rev(a, b):
1505 def cmp_by_rev(a, b):
1506 return cmp(revlog.rev(a), revlog.rev(b))
1506 return cmp(revlog.rev(a), revlog.rev(b))
1507 return cmp_by_rev
1507 return cmp_by_rev
1508
1508
1509 # If we determine that a particular file or manifest node must be a
1509 # If we determine that a particular file or manifest node must be a
1510 # node that the recipient of the changegroup will already have, we can
1510 # node that the recipient of the changegroup will already have, we can
1511 # also assume the recipient will have all the parents. This function
1511 # also assume the recipient will have all the parents. This function
1512 # prunes them from the set of missing nodes.
1512 # prunes them from the set of missing nodes.
1513 def prune_parents(revlog, hasset, msngset):
1513 def prune_parents(revlog, hasset, msngset):
1514 haslst = hasset.keys()
1514 haslst = hasset.keys()
1515 haslst.sort(cmp_by_rev_func(revlog))
1515 haslst.sort(cmp_by_rev_func(revlog))
1516 for node in haslst:
1516 for node in haslst:
1517 parentlst = [p for p in revlog.parents(node) if p != nullid]
1517 parentlst = [p for p in revlog.parents(node) if p != nullid]
1518 while parentlst:
1518 while parentlst:
1519 n = parentlst.pop()
1519 n = parentlst.pop()
1520 if n not in hasset:
1520 if n not in hasset:
1521 hasset[n] = 1
1521 hasset[n] = 1
1522 p = [p for p in revlog.parents(n) if p != nullid]
1522 p = [p for p in revlog.parents(n) if p != nullid]
1523 parentlst.extend(p)
1523 parentlst.extend(p)
1524 for n in hasset:
1524 for n in hasset:
1525 msngset.pop(n, None)
1525 msngset.pop(n, None)
1526
1526
1527 # This is a function generating function used to set up an environment
1527 # This is a function generating function used to set up an environment
1528 # for the inner function to execute in.
1528 # for the inner function to execute in.
1529 def manifest_and_file_collector(changedfileset):
1529 def manifest_and_file_collector(changedfileset):
1530 # This is an information gathering function that gathers
1530 # This is an information gathering function that gathers
1531 # information from each changeset node that goes out as part of
1531 # information from each changeset node that goes out as part of
1532 # the changegroup. The information gathered is a list of which
1532 # the changegroup. The information gathered is a list of which
1533 # manifest nodes are potentially required (the recipient may
1533 # manifest nodes are potentially required (the recipient may
1534 # already have them) and total list of all files which were
1534 # already have them) and total list of all files which were
1535 # changed in any changeset in the changegroup.
1535 # changed in any changeset in the changegroup.
1536 #
1536 #
1537 # We also remember the first changenode we saw any manifest
1537 # We also remember the first changenode we saw any manifest
1538 # referenced by so we can later determine which changenode 'owns'
1538 # referenced by so we can later determine which changenode 'owns'
1539 # the manifest.
1539 # the manifest.
1540 def collect_manifests_and_files(clnode):
1540 def collect_manifests_and_files(clnode):
1541 c = cl.read(clnode)
1541 c = cl.read(clnode)
1542 for f in c[3]:
1542 for f in c[3]:
1543 # This is to make sure we only have one instance of each
1543 # This is to make sure we only have one instance of each
1544 # filename string for each filename.
1544 # filename string for each filename.
1545 changedfileset.setdefault(f, f)
1545 changedfileset.setdefault(f, f)
1546 msng_mnfst_set.setdefault(c[0], clnode)
1546 msng_mnfst_set.setdefault(c[0], clnode)
1547 return collect_manifests_and_files
1547 return collect_manifests_and_files
1548
1548
1549 # Figure out which manifest nodes (of the ones we think might be part
1549 # Figure out which manifest nodes (of the ones we think might be part
1550 # of the changegroup) the recipient must know about and remove them
1550 # of the changegroup) the recipient must know about and remove them
1551 # from the changegroup.
1551 # from the changegroup.
1552 def prune_manifests():
1552 def prune_manifests():
1553 has_mnfst_set = {}
1553 has_mnfst_set = {}
1554 for n in msng_mnfst_set:
1554 for n in msng_mnfst_set:
1555 # If a 'missing' manifest thinks it belongs to a changenode
1555 # If a 'missing' manifest thinks it belongs to a changenode
1556 # the recipient is assumed to have, obviously the recipient
1556 # the recipient is assumed to have, obviously the recipient
1557 # must have that manifest.
1557 # must have that manifest.
1558 linknode = cl.node(mnfst.linkrev(n))
1558 linknode = cl.node(mnfst.linkrev(n))
1559 if linknode in has_cl_set:
1559 if linknode in has_cl_set:
1560 has_mnfst_set[n] = 1
1560 has_mnfst_set[n] = 1
1561 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1561 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1562
1562
1563 # Use the information collected in collect_manifests_and_files to say
1563 # Use the information collected in collect_manifests_and_files to say
1564 # which changenode any manifestnode belongs to.
1564 # which changenode any manifestnode belongs to.
1565 def lookup_manifest_link(mnfstnode):
1565 def lookup_manifest_link(mnfstnode):
1566 return msng_mnfst_set[mnfstnode]
1566 return msng_mnfst_set[mnfstnode]
1567
1567
1568 # A function generating function that sets up the initial environment
1568 # A function generating function that sets up the initial environment
1569 # the inner function.
1569 # the inner function.
1570 def filenode_collector(changedfiles):
1570 def filenode_collector(changedfiles):
1571 next_rev = [0]
1571 next_rev = [0]
1572 # This gathers information from each manifestnode included in the
1572 # This gathers information from each manifestnode included in the
1573 # changegroup about which filenodes the manifest node references
1573 # changegroup about which filenodes the manifest node references
1574 # so we can include those in the changegroup too.
1574 # so we can include those in the changegroup too.
1575 #
1575 #
1576 # It also remembers which changenode each filenode belongs to. It
1576 # It also remembers which changenode each filenode belongs to. It
1577 # does this by assuming the a filenode belongs to the changenode
1577 # does this by assuming the a filenode belongs to the changenode
1578 # the first manifest that references it belongs to.
1578 # the first manifest that references it belongs to.
1579 def collect_msng_filenodes(mnfstnode):
1579 def collect_msng_filenodes(mnfstnode):
1580 r = mnfst.rev(mnfstnode)
1580 r = mnfst.rev(mnfstnode)
1581 if r == next_rev[0]:
1581 if r == next_rev[0]:
1582 # If the last rev we looked at was the one just previous,
1582 # If the last rev we looked at was the one just previous,
1583 # we only need to see a diff.
1583 # we only need to see a diff.
1584 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1584 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1585 # For each line in the delta
1585 # For each line in the delta
1586 for dline in delta.splitlines():
1586 for dline in delta.splitlines():
1587 # get the filename and filenode for that line
1587 # get the filename and filenode for that line
1588 f, fnode = dline.split('\0')
1588 f, fnode = dline.split('\0')
1589 fnode = bin(fnode[:40])
1589 fnode = bin(fnode[:40])
1590 f = changedfiles.get(f, None)
1590 f = changedfiles.get(f, None)
1591 # And if the file is in the list of files we care
1591 # And if the file is in the list of files we care
1592 # about.
1592 # about.
1593 if f is not None:
1593 if f is not None:
1594 # Get the changenode this manifest belongs to
1594 # Get the changenode this manifest belongs to
1595 clnode = msng_mnfst_set[mnfstnode]
1595 clnode = msng_mnfst_set[mnfstnode]
1596 # Create the set of filenodes for the file if
1596 # Create the set of filenodes for the file if
1597 # there isn't one already.
1597 # there isn't one already.
1598 ndset = msng_filenode_set.setdefault(f, {})
1598 ndset = msng_filenode_set.setdefault(f, {})
1599 # And set the filenode's changelog node to the
1599 # And set the filenode's changelog node to the
1600 # manifest's if it hasn't been set already.
1600 # manifest's if it hasn't been set already.
1601 ndset.setdefault(fnode, clnode)
1601 ndset.setdefault(fnode, clnode)
1602 else:
1602 else:
1603 # Otherwise we need a full manifest.
1603 # Otherwise we need a full manifest.
1604 m = mnfst.read(mnfstnode)
1604 m = mnfst.read(mnfstnode)
1605 # For every file in we care about.
1605 # For every file in we care about.
1606 for f in changedfiles:
1606 for f in changedfiles:
1607 fnode = m.get(f, None)
1607 fnode = m.get(f, None)
1608 # If it's in the manifest
1608 # If it's in the manifest
1609 if fnode is not None:
1609 if fnode is not None:
1610 # See comments above.
1610 # See comments above.
1611 clnode = msng_mnfst_set[mnfstnode]
1611 clnode = msng_mnfst_set[mnfstnode]
1612 ndset = msng_filenode_set.setdefault(f, {})
1612 ndset = msng_filenode_set.setdefault(f, {})
1613 ndset.setdefault(fnode, clnode)
1613 ndset.setdefault(fnode, clnode)
1614 # Remember the revision we hope to see next.
1614 # Remember the revision we hope to see next.
1615 next_rev[0] = r + 1
1615 next_rev[0] = r + 1
1616 return collect_msng_filenodes
1616 return collect_msng_filenodes
1617
1617
1618 # We have a list of filenodes we think we need for a file, lets remove
1618 # We have a list of filenodes we think we need for a file, lets remove
1619 # all those we now the recipient must have.
1619 # all those we now the recipient must have.
1620 def prune_filenodes(f, filerevlog):
1620 def prune_filenodes(f, filerevlog):
1621 msngset = msng_filenode_set[f]
1621 msngset = msng_filenode_set[f]
1622 hasset = {}
1622 hasset = {}
1623 # If a 'missing' filenode thinks it belongs to a changenode we
1623 # If a 'missing' filenode thinks it belongs to a changenode we
1624 # assume the recipient must have, then the recipient must have
1624 # assume the recipient must have, then the recipient must have
1625 # that filenode.
1625 # that filenode.
1626 for n in msngset:
1626 for n in msngset:
1627 clnode = cl.node(filerevlog.linkrev(n))
1627 clnode = cl.node(filerevlog.linkrev(n))
1628 if clnode in has_cl_set:
1628 if clnode in has_cl_set:
1629 hasset[n] = 1
1629 hasset[n] = 1
1630 prune_parents(filerevlog, hasset, msngset)
1630 prune_parents(filerevlog, hasset, msngset)
1631
1631
1632 # A function generator function that sets up the a context for the
1632 # A function generator function that sets up the a context for the
1633 # inner function.
1633 # inner function.
1634 def lookup_filenode_link_func(fname):
1634 def lookup_filenode_link_func(fname):
1635 msngset = msng_filenode_set[fname]
1635 msngset = msng_filenode_set[fname]
1636 # Lookup the changenode the filenode belongs to.
1636 # Lookup the changenode the filenode belongs to.
1637 def lookup_filenode_link(fnode):
1637 def lookup_filenode_link(fnode):
1638 return msngset[fnode]
1638 return msngset[fnode]
1639 return lookup_filenode_link
1639 return lookup_filenode_link
1640
1640
1641 # Now that we have all theses utility functions to help out and
1641 # Now that we have all theses utility functions to help out and
1642 # logically divide up the task, generate the group.
1642 # logically divide up the task, generate the group.
1643 def gengroup():
1643 def gengroup():
1644 # The set of changed files starts empty.
1644 # The set of changed files starts empty.
1645 changedfiles = {}
1645 changedfiles = {}
1646 # Create a changenode group generator that will call our functions
1646 # Create a changenode group generator that will call our functions
1647 # back to lookup the owning changenode and collect information.
1647 # back to lookup the owning changenode and collect information.
1648 group = cl.group(msng_cl_lst, identity,
1648 group = cl.group(msng_cl_lst, identity,
1649 manifest_and_file_collector(changedfiles))
1649 manifest_and_file_collector(changedfiles))
1650 for chnk in group:
1650 for chnk in group:
1651 yield chnk
1651 yield chnk
1652
1652
1653 # The list of manifests has been collected by the generator
1653 # The list of manifests has been collected by the generator
1654 # calling our functions back.
1654 # calling our functions back.
1655 prune_manifests()
1655 prune_manifests()
1656 msng_mnfst_lst = msng_mnfst_set.keys()
1656 msng_mnfst_lst = msng_mnfst_set.keys()
1657 # Sort the manifestnodes by revision number.
1657 # Sort the manifestnodes by revision number.
1658 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1658 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1659 # Create a generator for the manifestnodes that calls our lookup
1659 # Create a generator for the manifestnodes that calls our lookup
1660 # and data collection functions back.
1660 # and data collection functions back.
1661 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1661 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1662 filenode_collector(changedfiles))
1662 filenode_collector(changedfiles))
1663 for chnk in group:
1663 for chnk in group:
1664 yield chnk
1664 yield chnk
1665
1665
1666 # These are no longer needed, dereference and toss the memory for
1666 # These are no longer needed, dereference and toss the memory for
1667 # them.
1667 # them.
1668 msng_mnfst_lst = None
1668 msng_mnfst_lst = None
1669 msng_mnfst_set.clear()
1669 msng_mnfst_set.clear()
1670
1670
1671 changedfiles = changedfiles.keys()
1671 changedfiles = changedfiles.keys()
1672 changedfiles.sort()
1672 changedfiles.sort()
1673 # Go through all our files in order sorted by name.
1673 # Go through all our files in order sorted by name.
1674 for fname in changedfiles:
1674 for fname in changedfiles:
1675 filerevlog = self.file(fname)
1675 filerevlog = self.file(fname)
1676 # Toss out the filenodes that the recipient isn't really
1676 # Toss out the filenodes that the recipient isn't really
1677 # missing.
1677 # missing.
1678 if msng_filenode_set.has_key(fname):
1678 if msng_filenode_set.has_key(fname):
1679 prune_filenodes(fname, filerevlog)
1679 prune_filenodes(fname, filerevlog)
1680 msng_filenode_lst = msng_filenode_set[fname].keys()
1680 msng_filenode_lst = msng_filenode_set[fname].keys()
1681 else:
1681 else:
1682 msng_filenode_lst = []
1682 msng_filenode_lst = []
1683 # If any filenodes are left, generate the group for them,
1683 # If any filenodes are left, generate the group for them,
1684 # otherwise don't bother.
1684 # otherwise don't bother.
1685 if len(msng_filenode_lst) > 0:
1685 if len(msng_filenode_lst) > 0:
1686 yield changegroup.genchunk(fname)
1686 yield changegroup.genchunk(fname)
1687 # Sort the filenodes by their revision #
1687 # Sort the filenodes by their revision #
1688 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1688 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1689 # Create a group generator and only pass in a changenode
1689 # Create a group generator and only pass in a changenode
1690 # lookup function as we need to collect no information
1690 # lookup function as we need to collect no information
1691 # from filenodes.
1691 # from filenodes.
1692 group = filerevlog.group(msng_filenode_lst,
1692 group = filerevlog.group(msng_filenode_lst,
1693 lookup_filenode_link_func(fname))
1693 lookup_filenode_link_func(fname))
1694 for chnk in group:
1694 for chnk in group:
1695 yield chnk
1695 yield chnk
1696 if msng_filenode_set.has_key(fname):
1696 if msng_filenode_set.has_key(fname):
1697 # Don't need this anymore, toss it to free memory.
1697 # Don't need this anymore, toss it to free memory.
1698 del msng_filenode_set[fname]
1698 del msng_filenode_set[fname]
1699 # Signal that no more groups are left.
1699 # Signal that no more groups are left.
1700 yield changegroup.closechunk()
1700 yield changegroup.closechunk()
1701
1701
1702 if msng_cl_lst:
1702 if msng_cl_lst:
1703 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1703 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1704
1704
1705 return util.chunkbuffer(gengroup())
1705 return util.chunkbuffer(gengroup())
1706
1706
1707 def changegroup(self, basenodes, source):
1707 def changegroup(self, basenodes, source):
1708 """Generate a changegroup of all nodes that we have that a recipient
1708 """Generate a changegroup of all nodes that we have that a recipient
1709 doesn't.
1709 doesn't.
1710
1710
1711 This is much easier than the previous function as we can assume that
1711 This is much easier than the previous function as we can assume that
1712 the recipient has any changenode we aren't sending them."""
1712 the recipient has any changenode we aren't sending them."""
1713
1713
1714 self.hook('preoutgoing', throw=True, source=source)
1714 self.hook('preoutgoing', throw=True, source=source)
1715
1715
1716 cl = self.changelog
1716 cl = self.changelog
1717 nodes = cl.nodesbetween(basenodes, None)[0]
1717 nodes = cl.nodesbetween(basenodes, None)[0]
1718 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1718 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1719 self.changegroupinfo(nodes)
1719 self.changegroupinfo(nodes)
1720
1720
1721 def identity(x):
1721 def identity(x):
1722 return x
1722 return x
1723
1723
1724 def gennodelst(revlog):
1724 def gennodelst(revlog):
1725 for r in xrange(0, revlog.count()):
1725 for r in xrange(0, revlog.count()):
1726 n = revlog.node(r)
1726 n = revlog.node(r)
1727 if revlog.linkrev(n) in revset:
1727 if revlog.linkrev(n) in revset:
1728 yield n
1728 yield n
1729
1729
1730 def changed_file_collector(changedfileset):
1730 def changed_file_collector(changedfileset):
1731 def collect_changed_files(clnode):
1731 def collect_changed_files(clnode):
1732 c = cl.read(clnode)
1732 c = cl.read(clnode)
1733 for fname in c[3]:
1733 for fname in c[3]:
1734 changedfileset[fname] = 1
1734 changedfileset[fname] = 1
1735 return collect_changed_files
1735 return collect_changed_files
1736
1736
1737 def lookuprevlink_func(revlog):
1737 def lookuprevlink_func(revlog):
1738 def lookuprevlink(n):
1738 def lookuprevlink(n):
1739 return cl.node(revlog.linkrev(n))
1739 return cl.node(revlog.linkrev(n))
1740 return lookuprevlink
1740 return lookuprevlink
1741
1741
1742 def gengroup():
1742 def gengroup():
1743 # construct a list of all changed files
1743 # construct a list of all changed files
1744 changedfiles = {}
1744 changedfiles = {}
1745
1745
1746 for chnk in cl.group(nodes, identity,
1746 for chnk in cl.group(nodes, identity,
1747 changed_file_collector(changedfiles)):
1747 changed_file_collector(changedfiles)):
1748 yield chnk
1748 yield chnk
1749 changedfiles = changedfiles.keys()
1749 changedfiles = changedfiles.keys()
1750 changedfiles.sort()
1750 changedfiles.sort()
1751
1751
1752 mnfst = self.manifest
1752 mnfst = self.manifest
1753 nodeiter = gennodelst(mnfst)
1753 nodeiter = gennodelst(mnfst)
1754 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1754 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1755 yield chnk
1755 yield chnk
1756
1756
1757 for fname in changedfiles:
1757 for fname in changedfiles:
1758 filerevlog = self.file(fname)
1758 filerevlog = self.file(fname)
1759 nodeiter = gennodelst(filerevlog)
1759 nodeiter = gennodelst(filerevlog)
1760 nodeiter = list(nodeiter)
1760 nodeiter = list(nodeiter)
1761 if nodeiter:
1761 if nodeiter:
1762 yield changegroup.genchunk(fname)
1762 yield changegroup.genchunk(fname)
1763 lookup = lookuprevlink_func(filerevlog)
1763 lookup = lookuprevlink_func(filerevlog)
1764 for chnk in filerevlog.group(nodeiter, lookup):
1764 for chnk in filerevlog.group(nodeiter, lookup):
1765 yield chnk
1765 yield chnk
1766
1766
1767 yield changegroup.closechunk()
1767 yield changegroup.closechunk()
1768
1768
1769 if nodes:
1769 if nodes:
1770 self.hook('outgoing', node=hex(nodes[0]), source=source)
1770 self.hook('outgoing', node=hex(nodes[0]), source=source)
1771
1771
1772 return util.chunkbuffer(gengroup())
1772 return util.chunkbuffer(gengroup())
1773
1773
1774 def addchangegroup(self, source, srctype, url):
1774 def addchangegroup(self, source, srctype, url):
1775 """add changegroup to repo.
1775 """add changegroup to repo.
1776
1776
1777 return values:
1777 return values:
1778 - nothing changed or no source: 0
1778 - nothing changed or no source: 0
1779 - more heads than before: 1+added heads (2..n)
1779 - more heads than before: 1+added heads (2..n)
1780 - less heads than before: -1-removed heads (-2..-n)
1780 - less heads than before: -1-removed heads (-2..-n)
1781 - number of heads stays the same: 1
1781 - number of heads stays the same: 1
1782 """
1782 """
1783 def csmap(x):
1783 def csmap(x):
1784 self.ui.debug(_("add changeset %s\n") % short(x))
1784 self.ui.debug(_("add changeset %s\n") % short(x))
1785 return cl.count()
1785 return cl.count()
1786
1786
1787 def revmap(x):
1787 def revmap(x):
1788 return cl.rev(x)
1788 return cl.rev(x)
1789
1789
1790 if not source:
1790 if not source:
1791 return 0
1791 return 0
1792
1792
1793 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1793 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1794
1794
1795 changesets = files = revisions = 0
1795 changesets = files = revisions = 0
1796
1796
1797 tr = self.transaction()
1797 tr = self.transaction()
1798
1798
1799 # write changelog data to temp files so concurrent readers will not see
1799 # write changelog data to temp files so concurrent readers will not see
1800 # inconsistent view
1800 # inconsistent view
1801 cl = self.changelog
1801 cl = self.changelog
1802 cl.delayupdate()
1802 cl.delayupdate()
1803 oldheads = len(cl.heads())
1803 oldheads = len(cl.heads())
1804
1804
1805 # pull off the changeset group
1805 # pull off the changeset group
1806 self.ui.status(_("adding changesets\n"))
1806 self.ui.status(_("adding changesets\n"))
1807 cor = cl.count() - 1
1807 cor = cl.count() - 1
1808 chunkiter = changegroup.chunkiter(source)
1808 chunkiter = changegroup.chunkiter(source)
1809 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1809 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1810 raise util.Abort(_("received changelog group is empty"))
1810 raise util.Abort(_("received changelog group is empty"))
1811 cnr = cl.count() - 1
1811 cnr = cl.count() - 1
1812 changesets = cnr - cor
1812 changesets = cnr - cor
1813
1813
1814 # pull off the manifest group
1814 # pull off the manifest group
1815 self.ui.status(_("adding manifests\n"))
1815 self.ui.status(_("adding manifests\n"))
1816 chunkiter = changegroup.chunkiter(source)
1816 chunkiter = changegroup.chunkiter(source)
1817 # no need to check for empty manifest group here:
1817 # no need to check for empty manifest group here:
1818 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1818 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1819 # no new manifest will be created and the manifest group will
1819 # no new manifest will be created and the manifest group will
1820 # be empty during the pull
1820 # be empty during the pull
1821 self.manifest.addgroup(chunkiter, revmap, tr)
1821 self.manifest.addgroup(chunkiter, revmap, tr)
1822
1822
1823 # process the files
1823 # process the files
1824 self.ui.status(_("adding file changes\n"))
1824 self.ui.status(_("adding file changes\n"))
1825 while 1:
1825 while 1:
1826 f = changegroup.getchunk(source)
1826 f = changegroup.getchunk(source)
1827 if not f:
1827 if not f:
1828 break
1828 break
1829 self.ui.debug(_("adding %s revisions\n") % f)
1829 self.ui.debug(_("adding %s revisions\n") % f)
1830 fl = self.file(f)
1830 fl = self.file(f)
1831 o = fl.count()
1831 o = fl.count()
1832 chunkiter = changegroup.chunkiter(source)
1832 chunkiter = changegroup.chunkiter(source)
1833 if fl.addgroup(chunkiter, revmap, tr) is None:
1833 if fl.addgroup(chunkiter, revmap, tr) is None:
1834 raise util.Abort(_("received file revlog group is empty"))
1834 raise util.Abort(_("received file revlog group is empty"))
1835 revisions += fl.count() - o
1835 revisions += fl.count() - o
1836 files += 1
1836 files += 1
1837
1837
1838 # make changelog see real files again
1838 # make changelog see real files again
1839 cl.finalize(tr)
1839 cl.finalize(tr)
1840
1840
1841 newheads = len(self.changelog.heads())
1841 newheads = len(self.changelog.heads())
1842 heads = ""
1842 heads = ""
1843 if oldheads and newheads != oldheads:
1843 if oldheads and newheads != oldheads:
1844 heads = _(" (%+d heads)") % (newheads - oldheads)
1844 heads = _(" (%+d heads)") % (newheads - oldheads)
1845
1845
1846 self.ui.status(_("added %d changesets"
1846 self.ui.status(_("added %d changesets"
1847 " with %d changes to %d files%s\n")
1847 " with %d changes to %d files%s\n")
1848 % (changesets, revisions, files, heads))
1848 % (changesets, revisions, files, heads))
1849
1849
1850 if changesets > 0:
1850 if changesets > 0:
1851 self.hook('pretxnchangegroup', throw=True,
1851 self.hook('pretxnchangegroup', throw=True,
1852 node=hex(self.changelog.node(cor+1)), source=srctype,
1852 node=hex(self.changelog.node(cor+1)), source=srctype,
1853 url=url)
1853 url=url)
1854
1854
1855 tr.close()
1855 tr.close()
1856
1856
1857 if changesets > 0:
1857 if changesets > 0:
1858 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1858 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1859 source=srctype, url=url)
1859 source=srctype, url=url)
1860
1860
1861 for i in xrange(cor + 1, cnr + 1):
1861 for i in xrange(cor + 1, cnr + 1):
1862 self.hook("incoming", node=hex(self.changelog.node(i)),
1862 self.hook("incoming", node=hex(self.changelog.node(i)),
1863 source=srctype, url=url)
1863 source=srctype, url=url)
1864
1864
1865 # never return 0 here:
1865 # never return 0 here:
1866 if newheads < oldheads:
1866 if newheads < oldheads:
1867 return newheads - oldheads - 1
1867 return newheads - oldheads - 1
1868 else:
1868 else:
1869 return newheads - oldheads + 1
1869 return newheads - oldheads + 1
1870
1870
1871
1871
1872 def stream_in(self, remote):
1872 def stream_in(self, remote):
1873 fp = remote.stream_out()
1873 fp = remote.stream_out()
1874 l = fp.readline()
1874 l = fp.readline()
1875 try:
1875 try:
1876 resp = int(l)
1876 resp = int(l)
1877 except ValueError:
1877 except ValueError:
1878 raise util.UnexpectedOutput(
1878 raise util.UnexpectedOutput(
1879 _('Unexpected response from remote server:'), l)
1879 _('Unexpected response from remote server:'), l)
1880 if resp == 1:
1880 if resp == 1:
1881 raise util.Abort(_('operation forbidden by server'))
1881 raise util.Abort(_('operation forbidden by server'))
1882 elif resp == 2:
1882 elif resp == 2:
1883 raise util.Abort(_('locking the remote repository failed'))
1883 raise util.Abort(_('locking the remote repository failed'))
1884 elif resp != 0:
1884 elif resp != 0:
1885 raise util.Abort(_('the server sent an unknown error code'))
1885 raise util.Abort(_('the server sent an unknown error code'))
1886 self.ui.status(_('streaming all changes\n'))
1886 self.ui.status(_('streaming all changes\n'))
1887 l = fp.readline()
1887 l = fp.readline()
1888 try:
1888 try:
1889 total_files, total_bytes = map(int, l.split(' ', 1))
1889 total_files, total_bytes = map(int, l.split(' ', 1))
1890 except ValueError, TypeError:
1890 except ValueError, TypeError:
1891 raise util.UnexpectedOutput(
1891 raise util.UnexpectedOutput(
1892 _('Unexpected response from remote server:'), l)
1892 _('Unexpected response from remote server:'), l)
1893 self.ui.status(_('%d files to transfer, %s of data\n') %
1893 self.ui.status(_('%d files to transfer, %s of data\n') %
1894 (total_files, util.bytecount(total_bytes)))
1894 (total_files, util.bytecount(total_bytes)))
1895 start = time.time()
1895 start = time.time()
1896 for i in xrange(total_files):
1896 for i in xrange(total_files):
1897 # XXX doesn't support '\n' or '\r' in filenames
1897 # XXX doesn't support '\n' or '\r' in filenames
1898 l = fp.readline()
1898 l = fp.readline()
1899 try:
1899 try:
1900 name, size = l.split('\0', 1)
1900 name, size = l.split('\0', 1)
1901 size = int(size)
1901 size = int(size)
1902 except ValueError, TypeError:
1902 except ValueError, TypeError:
1903 raise util.UnexpectedOutput(
1903 raise util.UnexpectedOutput(
1904 _('Unexpected response from remote server:'), l)
1904 _('Unexpected response from remote server:'), l)
1905 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1905 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1906 ofp = self.sopener(name, 'w')
1906 ofp = self.sopener(name, 'w')
1907 for chunk in util.filechunkiter(fp, limit=size):
1907 for chunk in util.filechunkiter(fp, limit=size):
1908 ofp.write(chunk)
1908 ofp.write(chunk)
1909 ofp.close()
1909 ofp.close()
1910 elapsed = time.time() - start
1910 elapsed = time.time() - start
1911 if elapsed <= 0:
1911 if elapsed <= 0:
1912 elapsed = 0.001
1912 elapsed = 0.001
1913 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1913 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1914 (util.bytecount(total_bytes), elapsed,
1914 (util.bytecount(total_bytes), elapsed,
1915 util.bytecount(total_bytes / elapsed)))
1915 util.bytecount(total_bytes / elapsed)))
1916 self.reload()
1916 self.reload()
1917 return len(self.heads()) + 1
1917 return len(self.heads()) + 1
1918
1918
1919 def clone(self, remote, heads=[], stream=False):
1919 def clone(self, remote, heads=[], stream=False):
1920 '''clone remote repository.
1920 '''clone remote repository.
1921
1921
1922 keyword arguments:
1922 keyword arguments:
1923 heads: list of revs to clone (forces use of pull)
1923 heads: list of revs to clone (forces use of pull)
1924 stream: use streaming clone if possible'''
1924 stream: use streaming clone if possible'''
1925
1925
1926 # now, all clients that can request uncompressed clones can
1926 # now, all clients that can request uncompressed clones can
1927 # read repo formats supported by all servers that can serve
1927 # read repo formats supported by all servers that can serve
1928 # them.
1928 # them.
1929
1929
1930 # if revlog format changes, client will have to check version
1930 # if revlog format changes, client will have to check version
1931 # and format flags on "stream" capability, and use
1931 # and format flags on "stream" capability, and use
1932 # uncompressed only if compatible.
1932 # uncompressed only if compatible.
1933
1933
1934 if stream and not heads and remote.capable('stream'):
1934 if stream and not heads and remote.capable('stream'):
1935 return self.stream_in(remote)
1935 return self.stream_in(remote)
1936 return self.pull(remote, heads)
1936 return self.pull(remote, heads)
1937
1937
1938 # used to avoid circular references so destructors work
1938 # used to avoid circular references so destructors work
1939 def aftertrans(files):
1939 def aftertrans(files):
1940 renamefiles = [tuple(t) for t in files]
1940 renamefiles = [tuple(t) for t in files]
1941 def a():
1941 def a():
1942 for src, dest in renamefiles:
1942 for src, dest in renamefiles:
1943 util.rename(src, dest)
1943 util.rename(src, dest)
1944 return a
1944 return a
1945
1945
1946 def instance(ui, path, create):
1946 def instance(ui, path, create):
1947 return localrepository(ui, util.drop_scheme('file', path), create)
1947 return localrepository(ui, util.drop_scheme('file', path), create)
1948
1948
1949 def islocal(path):
1949 def islocal(path):
1950 return True
1950 return True
@@ -1,1296 +1,1297 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
7 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from i18n import _
14 from i18n import _
15 import binascii, changegroup, errno, ancestor, mdiff, os
15 import binascii, changegroup, errno, ancestor, mdiff, os
16 import sha, struct, util, zlib
16 import sha, struct, util, zlib
17
17
18 # revlog version strings
18 # revlog version strings
19 REVLOGV0 = 0
19 REVLOGV0 = 0
20 REVLOGNG = 1
20 REVLOGNG = 1
21
21
22 # revlog flags
22 # revlog flags
23 REVLOGNGINLINEDATA = (1 << 16)
23 REVLOGNGINLINEDATA = (1 << 16)
24 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
24 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
25
25
26 REVLOG_DEFAULT_FORMAT = REVLOGNG
26 REVLOG_DEFAULT_FORMAT = REVLOGNG
27 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
27 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
28
28
29 def hash(text, p1, p2):
29 def hash(text, p1, p2):
30 """generate a hash from the given text and its parent hashes
30 """generate a hash from the given text and its parent hashes
31
31
32 This hash combines both the current file contents and its history
32 This hash combines both the current file contents and its history
33 in a manner that makes it easy to distinguish nodes with the same
33 in a manner that makes it easy to distinguish nodes with the same
34 content in the revision graph.
34 content in the revision graph.
35 """
35 """
36 l = [p1, p2]
36 l = [p1, p2]
37 l.sort()
37 l.sort()
38 s = sha.new(l[0])
38 s = sha.new(l[0])
39 s.update(l[1])
39 s.update(l[1])
40 s.update(text)
40 s.update(text)
41 return s.digest()
41 return s.digest()
42
42
43 def compress(text):
43 def compress(text):
44 """ generate a possibly-compressed representation of text """
44 """ generate a possibly-compressed representation of text """
45 if not text: return ("", text)
45 if not text: return ("", text)
46 if len(text) < 44:
46 if len(text) < 44:
47 if text[0] == '\0': return ("", text)
47 if text[0] == '\0': return ("", text)
48 return ('u', text)
48 return ('u', text)
49 bin = zlib.compress(text)
49 bin = zlib.compress(text)
50 if len(bin) > len(text):
50 if len(bin) > len(text):
51 if text[0] == '\0': return ("", text)
51 if text[0] == '\0': return ("", text)
52 return ('u', text)
52 return ('u', text)
53 return ("", bin)
53 return ("", bin)
54
54
55 def decompress(bin):
55 def decompress(bin):
56 """ decompress the given input """
56 """ decompress the given input """
57 if not bin: return bin
57 if not bin: return bin
58 t = bin[0]
58 t = bin[0]
59 if t == '\0': return bin
59 if t == '\0': return bin
60 if t == 'x': return zlib.decompress(bin)
60 if t == 'x': return zlib.decompress(bin)
61 if t == 'u': return bin[1:]
61 if t == 'u': return bin[1:]
62 raise RevlogError(_("unknown compression type %r") % t)
62 raise RevlogError(_("unknown compression type %r") % t)
63
63
64 indexformatv0 = ">4l20s20s20s"
64 indexformatv0 = ">4l20s20s20s"
65 v0shaoffset = 56
65 v0shaoffset = 56
66 # index ng:
66 # index ng:
67 # 6 bytes offset
67 # 6 bytes offset
68 # 2 bytes flags
68 # 2 bytes flags
69 # 4 bytes compressed length
69 # 4 bytes compressed length
70 # 4 bytes uncompressed length
70 # 4 bytes uncompressed length
71 # 4 bytes: base rev
71 # 4 bytes: base rev
72 # 4 bytes link rev
72 # 4 bytes link rev
73 # 4 bytes parent 1 rev
73 # 4 bytes parent 1 rev
74 # 4 bytes parent 2 rev
74 # 4 bytes parent 2 rev
75 # 32 bytes: nodeid
75 # 32 bytes: nodeid
76 indexformatng = ">Qiiiiii20s12x"
76 indexformatng = ">Qiiiiii20s12x"
77 ngshaoffset = 32
77 ngshaoffset = 32
78 versionformat = ">I"
78 versionformat = ">I"
79
79
80 class lazyparser(object):
80 class lazyparser(object):
81 """
81 """
82 this class avoids the need to parse the entirety of large indices
82 this class avoids the need to parse the entirety of large indices
83 """
83 """
84
84
85 # lazyparser is not safe to use on windows if win32 extensions not
85 # lazyparser is not safe to use on windows if win32 extensions not
86 # available. it keeps file handle open, which make it not possible
86 # available. it keeps file handle open, which make it not possible
87 # to break hardlinks on local cloned repos.
87 # to break hardlinks on local cloned repos.
88 safe_to_use = os.name != 'nt' or (not util.is_win_9x() and
88 safe_to_use = os.name != 'nt' or (not util.is_win_9x() and
89 hasattr(util, 'win32api'))
89 hasattr(util, 'win32api'))
90
90
91 def __init__(self, dataf, size, indexformat, shaoffset):
91 def __init__(self, dataf, size, indexformat, shaoffset):
92 self.dataf = dataf
92 self.dataf = dataf
93 self.format = indexformat
93 self.format = indexformat
94 self.s = struct.calcsize(indexformat)
94 self.s = struct.calcsize(indexformat)
95 self.indexformat = indexformat
95 self.indexformat = indexformat
96 self.datasize = size
96 self.datasize = size
97 self.l = size/self.s
97 self.l = size/self.s
98 self.index = [None] * self.l
98 self.index = [None] * self.l
99 self.map = {nullid: nullrev}
99 self.map = {nullid: nullrev}
100 self.allmap = 0
100 self.allmap = 0
101 self.all = 0
101 self.all = 0
102 self.mapfind_count = 0
102 self.mapfind_count = 0
103 self.shaoffset = shaoffset
103 self.shaoffset = shaoffset
104
104
105 def loadmap(self):
105 def loadmap(self):
106 """
106 """
107 during a commit, we need to make sure the rev being added is
107 during a commit, we need to make sure the rev being added is
108 not a duplicate. This requires loading the entire index,
108 not a duplicate. This requires loading the entire index,
109 which is fairly slow. loadmap can load up just the node map,
109 which is fairly slow. loadmap can load up just the node map,
110 which takes much less time.
110 which takes much less time.
111 """
111 """
112 if self.allmap: return
112 if self.allmap: return
113 end = self.datasize
113 end = self.datasize
114 self.allmap = 1
114 self.allmap = 1
115 cur = 0
115 cur = 0
116 count = 0
116 count = 0
117 blocksize = self.s * 256
117 blocksize = self.s * 256
118 self.dataf.seek(0)
118 self.dataf.seek(0)
119 while cur < end:
119 while cur < end:
120 data = self.dataf.read(blocksize)
120 data = self.dataf.read(blocksize)
121 off = 0
121 off = 0
122 for x in xrange(256):
122 for x in xrange(256):
123 n = data[off + self.shaoffset:off + self.shaoffset + 20]
123 n = data[off + self.shaoffset:off + self.shaoffset + 20]
124 self.map[n] = count
124 self.map[n] = count
125 count += 1
125 count += 1
126 if count >= self.l:
126 if count >= self.l:
127 break
127 break
128 off += self.s
128 off += self.s
129 cur += blocksize
129 cur += blocksize
130
130
131 def loadblock(self, blockstart, blocksize, data=None):
131 def loadblock(self, blockstart, blocksize, data=None):
132 if self.all: return
132 if self.all: return
133 if data is None:
133 if data is None:
134 self.dataf.seek(blockstart)
134 self.dataf.seek(blockstart)
135 if blockstart + blocksize > self.datasize:
135 if blockstart + blocksize > self.datasize:
136 # the revlog may have grown since we've started running,
136 # the revlog may have grown since we've started running,
137 # but we don't have space in self.index for more entries.
137 # but we don't have space in self.index for more entries.
138 # limit blocksize so that we don't get too much data.
138 # limit blocksize so that we don't get too much data.
139 blocksize = max(self.datasize - blockstart, 0)
139 blocksize = max(self.datasize - blockstart, 0)
140 data = self.dataf.read(blocksize)
140 data = self.dataf.read(blocksize)
141 lend = len(data) / self.s
141 lend = len(data) / self.s
142 i = blockstart / self.s
142 i = blockstart / self.s
143 off = 0
143 off = 0
144 # lazyindex supports __delitem__
144 # lazyindex supports __delitem__
145 if lend > len(self.index) - i:
145 if lend > len(self.index) - i:
146 lend = len(self.index) - i
146 lend = len(self.index) - i
147 for x in xrange(lend):
147 for x in xrange(lend):
148 if self.index[i + x] == None:
148 if self.index[i + x] == None:
149 b = data[off : off + self.s]
149 b = data[off : off + self.s]
150 self.index[i + x] = b
150 self.index[i + x] = b
151 n = b[self.shaoffset:self.shaoffset + 20]
151 n = b[self.shaoffset:self.shaoffset + 20]
152 self.map[n] = i + x
152 self.map[n] = i + x
153 off += self.s
153 off += self.s
154
154
155 def findnode(self, node):
155 def findnode(self, node):
156 """search backwards through the index file for a specific node"""
156 """search backwards through the index file for a specific node"""
157 if self.allmap: return None
157 if self.allmap: return None
158
158
159 # hg log will cause many many searches for the manifest
159 # hg log will cause many many searches for the manifest
160 # nodes. After we get called a few times, just load the whole
160 # nodes. After we get called a few times, just load the whole
161 # thing.
161 # thing.
162 if self.mapfind_count > 8:
162 if self.mapfind_count > 8:
163 self.loadmap()
163 self.loadmap()
164 if node in self.map:
164 if node in self.map:
165 return node
165 return node
166 return None
166 return None
167 self.mapfind_count += 1
167 self.mapfind_count += 1
168 last = self.l - 1
168 last = self.l - 1
169 while self.index[last] != None:
169 while self.index[last] != None:
170 if last == 0:
170 if last == 0:
171 self.all = 1
171 self.all = 1
172 self.allmap = 1
172 self.allmap = 1
173 return None
173 return None
174 last -= 1
174 last -= 1
175 end = (last + 1) * self.s
175 end = (last + 1) * self.s
176 blocksize = self.s * 256
176 blocksize = self.s * 256
177 while end >= 0:
177 while end >= 0:
178 start = max(end - blocksize, 0)
178 start = max(end - blocksize, 0)
179 self.dataf.seek(start)
179 self.dataf.seek(start)
180 data = self.dataf.read(end - start)
180 data = self.dataf.read(end - start)
181 findend = end - start
181 findend = end - start
182 while True:
182 while True:
183 # we're searching backwards, so weh have to make sure
183 # we're searching backwards, so weh have to make sure
184 # we don't find a changeset where this node is a parent
184 # we don't find a changeset where this node is a parent
185 off = data.rfind(node, 0, findend)
185 off = data.rfind(node, 0, findend)
186 findend = off
186 findend = off
187 if off >= 0:
187 if off >= 0:
188 i = off / self.s
188 i = off / self.s
189 off = i * self.s
189 off = i * self.s
190 n = data[off + self.shaoffset:off + self.shaoffset + 20]
190 n = data[off + self.shaoffset:off + self.shaoffset + 20]
191 if n == node:
191 if n == node:
192 self.map[n] = i + start / self.s
192 self.map[n] = i + start / self.s
193 return node
193 return node
194 else:
194 else:
195 break
195 break
196 end -= blocksize
196 end -= blocksize
197 return None
197 return None
198
198
199 def loadindex(self, i=None, end=None):
199 def loadindex(self, i=None, end=None):
200 if self.all: return
200 if self.all: return
201 all = False
201 all = False
202 if i == None:
202 if i == None:
203 blockstart = 0
203 blockstart = 0
204 blocksize = (512 / self.s) * self.s
204 blocksize = (512 / self.s) * self.s
205 end = self.datasize
205 end = self.datasize
206 all = True
206 all = True
207 else:
207 else:
208 if end:
208 if end:
209 blockstart = i * self.s
209 blockstart = i * self.s
210 end = end * self.s
210 end = end * self.s
211 blocksize = end - blockstart
211 blocksize = end - blockstart
212 else:
212 else:
213 blockstart = (i & ~63) * self.s
213 blockstart = (i & ~63) * self.s
214 blocksize = self.s * 64
214 blocksize = self.s * 64
215 end = blockstart + blocksize
215 end = blockstart + blocksize
216 while blockstart < end:
216 while blockstart < end:
217 self.loadblock(blockstart, blocksize)
217 self.loadblock(blockstart, blocksize)
218 blockstart += blocksize
218 blockstart += blocksize
219 if all: self.all = True
219 if all: self.all = True
220
220
221 class lazyindex(object):
221 class lazyindex(object):
222 """a lazy version of the index array"""
222 """a lazy version of the index array"""
223 def __init__(self, parser):
223 def __init__(self, parser):
224 self.p = parser
224 self.p = parser
225 def __len__(self):
225 def __len__(self):
226 return len(self.p.index)
226 return len(self.p.index)
227 def load(self, pos):
227 def load(self, pos):
228 if pos < 0:
228 if pos < 0:
229 pos += len(self.p.index)
229 pos += len(self.p.index)
230 self.p.loadindex(pos)
230 self.p.loadindex(pos)
231 return self.p.index[pos]
231 return self.p.index[pos]
232 def __getitem__(self, pos):
232 def __getitem__(self, pos):
233 ret = self.p.index[pos] or self.load(pos)
233 ret = self.p.index[pos] or self.load(pos)
234 if isinstance(ret, str):
234 if isinstance(ret, str):
235 ret = struct.unpack(self.p.indexformat, ret)
235 ret = struct.unpack(self.p.indexformat, ret)
236 return ret
236 return ret
237 def __setitem__(self, pos, item):
237 def __setitem__(self, pos, item):
238 self.p.index[pos] = item
238 self.p.index[pos] = item
239 def __delitem__(self, pos):
239 def __delitem__(self, pos):
240 del self.p.index[pos]
240 del self.p.index[pos]
241 def append(self, e):
241 def append(self, e):
242 self.p.index.append(e)
242 self.p.index.append(e)
243
243
244 class lazymap(object):
244 class lazymap(object):
245 """a lazy version of the node map"""
245 """a lazy version of the node map"""
246 def __init__(self, parser):
246 def __init__(self, parser):
247 self.p = parser
247 self.p = parser
248 def load(self, key):
248 def load(self, key):
249 n = self.p.findnode(key)
249 n = self.p.findnode(key)
250 if n == None:
250 if n == None:
251 raise KeyError(key)
251 raise KeyError(key)
252 def __contains__(self, key):
252 def __contains__(self, key):
253 if key in self.p.map:
253 if key in self.p.map:
254 return True
254 return True
255 self.p.loadmap()
255 self.p.loadmap()
256 return key in self.p.map
256 return key in self.p.map
257 def __iter__(self):
257 def __iter__(self):
258 yield nullid
258 yield nullid
259 for i in xrange(self.p.l):
259 for i in xrange(self.p.l):
260 ret = self.p.index[i]
260 ret = self.p.index[i]
261 if not ret:
261 if not ret:
262 self.p.loadindex(i)
262 self.p.loadindex(i)
263 ret = self.p.index[i]
263 ret = self.p.index[i]
264 if isinstance(ret, str):
264 if isinstance(ret, str):
265 ret = struct.unpack(self.p.indexformat, ret)
265 ret = struct.unpack(self.p.indexformat, ret)
266 yield ret[-1]
266 yield ret[-1]
267 def __getitem__(self, key):
267 def __getitem__(self, key):
268 try:
268 try:
269 return self.p.map[key]
269 return self.p.map[key]
270 except KeyError:
270 except KeyError:
271 try:
271 try:
272 self.load(key)
272 self.load(key)
273 return self.p.map[key]
273 return self.p.map[key]
274 except KeyError:
274 except KeyError:
275 raise KeyError("node " + hex(key))
275 raise KeyError("node " + hex(key))
276 def __setitem__(self, key, val):
276 def __setitem__(self, key, val):
277 self.p.map[key] = val
277 self.p.map[key] = val
278 def __delitem__(self, key):
278 def __delitem__(self, key):
279 del self.p.map[key]
279 del self.p.map[key]
280
280
281 class RevlogError(Exception): pass
281 class RevlogError(Exception): pass
282 class LookupError(RevlogError): pass
282 class LookupError(RevlogError): pass
283
283
284 class revlog(object):
284 class revlog(object):
285 """
285 """
286 the underlying revision storage object
286 the underlying revision storage object
287
287
288 A revlog consists of two parts, an index and the revision data.
288 A revlog consists of two parts, an index and the revision data.
289
289
290 The index is a file with a fixed record size containing
290 The index is a file with a fixed record size containing
291 information on each revision, includings its nodeid (hash), the
291 information on each revision, includings its nodeid (hash), the
292 nodeids of its parents, the position and offset of its data within
292 nodeids of its parents, the position and offset of its data within
293 the data file, and the revision it's based on. Finally, each entry
293 the data file, and the revision it's based on. Finally, each entry
294 contains a linkrev entry that can serve as a pointer to external
294 contains a linkrev entry that can serve as a pointer to external
295 data.
295 data.
296
296
297 The revision data itself is a linear collection of data chunks.
297 The revision data itself is a linear collection of data chunks.
298 Each chunk represents a revision and is usually represented as a
298 Each chunk represents a revision and is usually represented as a
299 delta against the previous chunk. To bound lookup time, runs of
299 delta against the previous chunk. To bound lookup time, runs of
300 deltas are limited to about 2 times the length of the original
300 deltas are limited to about 2 times the length of the original
301 version data. This makes retrieval of a version proportional to
301 version data. This makes retrieval of a version proportional to
302 its size, or O(1) relative to the number of revisions.
302 its size, or O(1) relative to the number of revisions.
303
303
304 Both pieces of the revlog are written to in an append-only
304 Both pieces of the revlog are written to in an append-only
305 fashion, which means we never need to rewrite a file to insert or
305 fashion, which means we never need to rewrite a file to insert or
306 remove data, and can use some simple techniques to avoid the need
306 remove data, and can use some simple techniques to avoid the need
307 for locking while reading.
307 for locking while reading.
308 """
308 """
309 def __init__(self, opener, indexfile):
309 def __init__(self, opener, indexfile):
310 """
310 """
311 create a revlog object
311 create a revlog object
312
312
313 opener is a function that abstracts the file opening operation
313 opener is a function that abstracts the file opening operation
314 and can be used to implement COW semantics or the like.
314 and can be used to implement COW semantics or the like.
315 """
315 """
316 self.indexfile = indexfile
316 self.indexfile = indexfile
317 self.datafile = indexfile[:-2] + ".d"
317 self.datafile = indexfile[:-2] + ".d"
318 self.opener = opener
318 self.opener = opener
319
319
320 self.indexstat = None
320 self.indexstat = None
321 self.cache = None
321 self.cache = None
322 self.chunkcache = None
322 self.chunkcache = None
323 self.defversion = REVLOG_DEFAULT_VERSION
323 self.defversion = REVLOG_DEFAULT_VERSION
324 if hasattr(opener, "defversion"):
324 if hasattr(opener, "defversion"):
325 self.defversion = opener.defversion
325 self.defversion = opener.defversion
326 if self.defversion & REVLOGNG:
326 if self.defversion & REVLOGNG:
327 self.defversion |= REVLOGNGINLINEDATA
327 self.defversion |= REVLOGNGINLINEDATA
328 self.load()
328 self.load()
329
329
330 def load(self):
330 def load(self):
331 v = self.defversion
331 v = self.defversion
332 try:
332 try:
333 f = self.opener(self.indexfile)
333 f = self.opener(self.indexfile)
334 i = f.read(4)
334 i = f.read(4)
335 f.seek(0)
335 f.seek(0)
336 except IOError, inst:
336 except IOError, inst:
337 if inst.errno != errno.ENOENT:
337 if inst.errno != errno.ENOENT:
338 raise
338 raise
339 i = ""
339 i = ""
340 else:
340 else:
341 try:
341 try:
342 st = util.fstat(f)
342 st = util.fstat(f)
343 except AttributeError, inst:
343 except AttributeError, inst:
344 st = None
344 st = None
345 else:
345 else:
346 oldst = self.indexstat
346 oldst = self.indexstat
347 if (oldst and st.st_dev == oldst.st_dev
347 if (oldst and st.st_dev == oldst.st_dev
348 and st.st_ino == oldst.st_ino
348 and st.st_ino == oldst.st_ino
349 and st.st_mtime == oldst.st_mtime
349 and st.st_mtime == oldst.st_mtime
350 and st.st_ctime == oldst.st_ctime):
350 and st.st_ctime == oldst.st_ctime
351 and st.st_size == oldst.st_size):
351 return
352 return
352 self.indexstat = st
353 self.indexstat = st
353 if len(i) > 0:
354 if len(i) > 0:
354 v = struct.unpack(versionformat, i)[0]
355 v = struct.unpack(versionformat, i)[0]
355 flags = v & ~0xFFFF
356 flags = v & ~0xFFFF
356 fmt = v & 0xFFFF
357 fmt = v & 0xFFFF
357 if fmt == REVLOGV0:
358 if fmt == REVLOGV0:
358 if flags:
359 if flags:
359 raise RevlogError(_("index %s unknown flags %#04x for format v0")
360 raise RevlogError(_("index %s unknown flags %#04x for format v0")
360 % (self.indexfile, flags >> 16))
361 % (self.indexfile, flags >> 16))
361 elif fmt == REVLOGNG:
362 elif fmt == REVLOGNG:
362 if flags & ~REVLOGNGINLINEDATA:
363 if flags & ~REVLOGNGINLINEDATA:
363 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
364 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
364 % (self.indexfile, flags >> 16))
365 % (self.indexfile, flags >> 16))
365 else:
366 else:
366 raise RevlogError(_("index %s unknown format %d")
367 raise RevlogError(_("index %s unknown format %d")
367 % (self.indexfile, fmt))
368 % (self.indexfile, fmt))
368 self.version = v
369 self.version = v
369 if v == REVLOGV0:
370 if v == REVLOGV0:
370 self.indexformat = indexformatv0
371 self.indexformat = indexformatv0
371 shaoffset = v0shaoffset
372 shaoffset = v0shaoffset
372 else:
373 else:
373 self.indexformat = indexformatng
374 self.indexformat = indexformatng
374 shaoffset = ngshaoffset
375 shaoffset = ngshaoffset
375
376
376 if i:
377 if i:
377 if (lazyparser.safe_to_use and not self.inlinedata() and
378 if (lazyparser.safe_to_use and not self.inlinedata() and
378 st and st.st_size > 10000):
379 st and st.st_size > 10000):
379 # big index, let's parse it on demand
380 # big index, let's parse it on demand
380 parser = lazyparser(f, st.st_size, self.indexformat, shaoffset)
381 parser = lazyparser(f, st.st_size, self.indexformat, shaoffset)
381 self.index = lazyindex(parser)
382 self.index = lazyindex(parser)
382 self.nodemap = lazymap(parser)
383 self.nodemap = lazymap(parser)
383 else:
384 else:
384 self.parseindex(f, st)
385 self.parseindex(f, st)
385 if self.version != REVLOGV0:
386 if self.version != REVLOGV0:
386 e = list(self.index[0])
387 e = list(self.index[0])
387 type = self.ngtype(e[0])
388 type = self.ngtype(e[0])
388 e[0] = self.offset_type(0, type)
389 e[0] = self.offset_type(0, type)
389 self.index[0] = e
390 self.index[0] = e
390 else:
391 else:
391 self.nodemap = {nullid: nullrev}
392 self.nodemap = {nullid: nullrev}
392 self.index = []
393 self.index = []
393
394
394
395
395 def parseindex(self, fp, st):
396 def parseindex(self, fp, st):
396 s = struct.calcsize(self.indexformat)
397 s = struct.calcsize(self.indexformat)
397 self.index = []
398 self.index = []
398 self.nodemap = {nullid: nullrev}
399 self.nodemap = {nullid: nullrev}
399 inline = self.inlinedata()
400 inline = self.inlinedata()
400 n = 0
401 n = 0
401 leftover = None
402 leftover = None
402 while True:
403 while True:
403 if st:
404 if st:
404 data = fp.read(65536)
405 data = fp.read(65536)
405 else:
406 else:
406 # hack for httprangereader, it doesn't do partial reads well
407 # hack for httprangereader, it doesn't do partial reads well
407 data = fp.read()
408 data = fp.read()
408 if not data:
409 if not data:
409 break
410 break
410 if n == 0 and self.inlinedata():
411 if n == 0 and self.inlinedata():
411 # cache the first chunk
412 # cache the first chunk
412 self.chunkcache = (0, data)
413 self.chunkcache = (0, data)
413 if leftover:
414 if leftover:
414 data = leftover + data
415 data = leftover + data
415 leftover = None
416 leftover = None
416 off = 0
417 off = 0
417 l = len(data)
418 l = len(data)
418 while off < l:
419 while off < l:
419 if l - off < s:
420 if l - off < s:
420 leftover = data[off:]
421 leftover = data[off:]
421 break
422 break
422 cur = data[off:off + s]
423 cur = data[off:off + s]
423 off += s
424 off += s
424 e = struct.unpack(self.indexformat, cur)
425 e = struct.unpack(self.indexformat, cur)
425 self.index.append(e)
426 self.index.append(e)
426 self.nodemap[e[-1]] = n
427 self.nodemap[e[-1]] = n
427 n += 1
428 n += 1
428 if inline:
429 if inline:
429 if e[1] < 0:
430 if e[1] < 0:
430 break
431 break
431 off += e[1]
432 off += e[1]
432 if off > l:
433 if off > l:
433 # some things don't seek well, just read it
434 # some things don't seek well, just read it
434 fp.read(off - l)
435 fp.read(off - l)
435 break
436 break
436 if not st:
437 if not st:
437 break
438 break
438
439
439
440
440 def ngoffset(self, q):
441 def ngoffset(self, q):
441 if q & 0xFFFF:
442 if q & 0xFFFF:
442 raise RevlogError(_('%s: incompatible revision flag %x') %
443 raise RevlogError(_('%s: incompatible revision flag %x') %
443 (self.indexfile, q))
444 (self.indexfile, q))
444 return long(q >> 16)
445 return long(q >> 16)
445
446
446 def ngtype(self, q):
447 def ngtype(self, q):
447 return int(q & 0xFFFF)
448 return int(q & 0xFFFF)
448
449
449 def offset_type(self, offset, type):
450 def offset_type(self, offset, type):
450 return long(long(offset) << 16 | type)
451 return long(long(offset) << 16 | type)
451
452
452 def loadindex(self, start, end):
453 def loadindex(self, start, end):
453 """load a block of indexes all at once from the lazy parser"""
454 """load a block of indexes all at once from the lazy parser"""
454 if isinstance(self.index, lazyindex):
455 if isinstance(self.index, lazyindex):
455 self.index.p.loadindex(start, end)
456 self.index.p.loadindex(start, end)
456
457
457 def loadindexmap(self):
458 def loadindexmap(self):
458 """loads both the map and the index from the lazy parser"""
459 """loads both the map and the index from the lazy parser"""
459 if isinstance(self.index, lazyindex):
460 if isinstance(self.index, lazyindex):
460 p = self.index.p
461 p = self.index.p
461 p.loadindex()
462 p.loadindex()
462 self.nodemap = p.map
463 self.nodemap = p.map
463
464
464 def loadmap(self):
465 def loadmap(self):
465 """loads the map from the lazy parser"""
466 """loads the map from the lazy parser"""
466 if isinstance(self.nodemap, lazymap):
467 if isinstance(self.nodemap, lazymap):
467 self.nodemap.p.loadmap()
468 self.nodemap.p.loadmap()
468 self.nodemap = self.nodemap.p.map
469 self.nodemap = self.nodemap.p.map
469
470
470 def inlinedata(self): return self.version & REVLOGNGINLINEDATA
471 def inlinedata(self): return self.version & REVLOGNGINLINEDATA
471 def tip(self): return self.node(len(self.index) - 1)
472 def tip(self): return self.node(len(self.index) - 1)
472 def count(self): return len(self.index)
473 def count(self): return len(self.index)
473 def node(self, rev):
474 def node(self, rev):
474 return rev == nullrev and nullid or self.index[rev][-1]
475 return rev == nullrev and nullid or self.index[rev][-1]
475 def rev(self, node):
476 def rev(self, node):
476 try:
477 try:
477 return self.nodemap[node]
478 return self.nodemap[node]
478 except KeyError:
479 except KeyError:
479 raise LookupError(_('%s: no node %s') % (self.indexfile, hex(node)))
480 raise LookupError(_('%s: no node %s') % (self.indexfile, hex(node)))
480 def linkrev(self, node):
481 def linkrev(self, node):
481 return (node == nullid) and nullrev or self.index[self.rev(node)][-4]
482 return (node == nullid) and nullrev or self.index[self.rev(node)][-4]
482 def parents(self, node):
483 def parents(self, node):
483 if node == nullid: return (nullid, nullid)
484 if node == nullid: return (nullid, nullid)
484 r = self.rev(node)
485 r = self.rev(node)
485 d = self.index[r][-3:-1]
486 d = self.index[r][-3:-1]
486 if self.version == REVLOGV0:
487 if self.version == REVLOGV0:
487 return d
488 return d
488 return (self.node(d[0]), self.node(d[1]))
489 return (self.node(d[0]), self.node(d[1]))
489 def parentrevs(self, rev):
490 def parentrevs(self, rev):
490 if rev == nullrev:
491 if rev == nullrev:
491 return (nullrev, nullrev)
492 return (nullrev, nullrev)
492 d = self.index[rev][-3:-1]
493 d = self.index[rev][-3:-1]
493 if self.version == REVLOGV0:
494 if self.version == REVLOGV0:
494 return (self.rev(d[0]), self.rev(d[1]))
495 return (self.rev(d[0]), self.rev(d[1]))
495 return d
496 return d
496 def start(self, rev):
497 def start(self, rev):
497 if rev == nullrev:
498 if rev == nullrev:
498 return 0
499 return 0
499 if self.version != REVLOGV0:
500 if self.version != REVLOGV0:
500 return self.ngoffset(self.index[rev][0])
501 return self.ngoffset(self.index[rev][0])
501 return self.index[rev][0]
502 return self.index[rev][0]
502
503
503 def end(self, rev): return self.start(rev) + self.length(rev)
504 def end(self, rev): return self.start(rev) + self.length(rev)
504
505
505 def size(self, rev):
506 def size(self, rev):
506 """return the length of the uncompressed text for a given revision"""
507 """return the length of the uncompressed text for a given revision"""
507 if rev == nullrev:
508 if rev == nullrev:
508 return 0
509 return 0
509 l = -1
510 l = -1
510 if self.version != REVLOGV0:
511 if self.version != REVLOGV0:
511 l = self.index[rev][2]
512 l = self.index[rev][2]
512 if l >= 0:
513 if l >= 0:
513 return l
514 return l
514
515
515 t = self.revision(self.node(rev))
516 t = self.revision(self.node(rev))
516 return len(t)
517 return len(t)
517
518
518 # alternate implementation, The advantage to this code is it
519 # alternate implementation, The advantage to this code is it
519 # will be faster for a single revision. But, the results are not
520 # will be faster for a single revision. But, the results are not
520 # cached, so finding the size of every revision will be slower.
521 # cached, so finding the size of every revision will be slower.
521 """
522 """
522 if self.cache and self.cache[1] == rev:
523 if self.cache and self.cache[1] == rev:
523 return len(self.cache[2])
524 return len(self.cache[2])
524
525
525 base = self.base(rev)
526 base = self.base(rev)
526 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
527 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
527 base = self.cache[1]
528 base = self.cache[1]
528 text = self.cache[2]
529 text = self.cache[2]
529 else:
530 else:
530 text = self.revision(self.node(base))
531 text = self.revision(self.node(base))
531
532
532 l = len(text)
533 l = len(text)
533 for x in xrange(base + 1, rev + 1):
534 for x in xrange(base + 1, rev + 1):
534 l = mdiff.patchedsize(l, self.chunk(x))
535 l = mdiff.patchedsize(l, self.chunk(x))
535 return l
536 return l
536 """
537 """
537
538
538 def length(self, rev):
539 def length(self, rev):
539 if rev == nullrev:
540 if rev == nullrev:
540 return 0
541 return 0
541 else:
542 else:
542 return self.index[rev][1]
543 return self.index[rev][1]
543 def base(self, rev):
544 def base(self, rev):
544 if (rev == nullrev):
545 if (rev == nullrev):
545 return nullrev
546 return nullrev
546 else:
547 else:
547 return self.index[rev][-5]
548 return self.index[rev][-5]
548
549
549 def reachable(self, node, stop=None):
550 def reachable(self, node, stop=None):
550 """return a hash of all nodes ancestral to a given node, including
551 """return a hash of all nodes ancestral to a given node, including
551 the node itself, stopping when stop is matched"""
552 the node itself, stopping when stop is matched"""
552 reachable = {}
553 reachable = {}
553 visit = [node]
554 visit = [node]
554 reachable[node] = 1
555 reachable[node] = 1
555 if stop:
556 if stop:
556 stopn = self.rev(stop)
557 stopn = self.rev(stop)
557 else:
558 else:
558 stopn = 0
559 stopn = 0
559 while visit:
560 while visit:
560 n = visit.pop(0)
561 n = visit.pop(0)
561 if n == stop:
562 if n == stop:
562 continue
563 continue
563 if n == nullid:
564 if n == nullid:
564 continue
565 continue
565 for p in self.parents(n):
566 for p in self.parents(n):
566 if self.rev(p) < stopn:
567 if self.rev(p) < stopn:
567 continue
568 continue
568 if p not in reachable:
569 if p not in reachable:
569 reachable[p] = 1
570 reachable[p] = 1
570 visit.append(p)
571 visit.append(p)
571 return reachable
572 return reachable
572
573
573 def nodesbetween(self, roots=None, heads=None):
574 def nodesbetween(self, roots=None, heads=None):
574 """Return a tuple containing three elements. Elements 1 and 2 contain
575 """Return a tuple containing three elements. Elements 1 and 2 contain
575 a final list bases and heads after all the unreachable ones have been
576 a final list bases and heads after all the unreachable ones have been
576 pruned. Element 0 contains a topologically sorted list of all
577 pruned. Element 0 contains a topologically sorted list of all
577
578
578 nodes that satisfy these constraints:
579 nodes that satisfy these constraints:
579 1. All nodes must be descended from a node in roots (the nodes on
580 1. All nodes must be descended from a node in roots (the nodes on
580 roots are considered descended from themselves).
581 roots are considered descended from themselves).
581 2. All nodes must also be ancestors of a node in heads (the nodes in
582 2. All nodes must also be ancestors of a node in heads (the nodes in
582 heads are considered to be their own ancestors).
583 heads are considered to be their own ancestors).
583
584
584 If roots is unspecified, nullid is assumed as the only root.
585 If roots is unspecified, nullid is assumed as the only root.
585 If heads is unspecified, it is taken to be the output of the
586 If heads is unspecified, it is taken to be the output of the
586 heads method (i.e. a list of all nodes in the repository that
587 heads method (i.e. a list of all nodes in the repository that
587 have no children)."""
588 have no children)."""
588 nonodes = ([], [], [])
589 nonodes = ([], [], [])
589 if roots is not None:
590 if roots is not None:
590 roots = list(roots)
591 roots = list(roots)
591 if not roots:
592 if not roots:
592 return nonodes
593 return nonodes
593 lowestrev = min([self.rev(n) for n in roots])
594 lowestrev = min([self.rev(n) for n in roots])
594 else:
595 else:
595 roots = [nullid] # Everybody's a descendent of nullid
596 roots = [nullid] # Everybody's a descendent of nullid
596 lowestrev = nullrev
597 lowestrev = nullrev
597 if (lowestrev == nullrev) and (heads is None):
598 if (lowestrev == nullrev) and (heads is None):
598 # We want _all_ the nodes!
599 # We want _all_ the nodes!
599 return ([self.node(r) for r in xrange(0, self.count())],
600 return ([self.node(r) for r in xrange(0, self.count())],
600 [nullid], list(self.heads()))
601 [nullid], list(self.heads()))
601 if heads is None:
602 if heads is None:
602 # All nodes are ancestors, so the latest ancestor is the last
603 # All nodes are ancestors, so the latest ancestor is the last
603 # node.
604 # node.
604 highestrev = self.count() - 1
605 highestrev = self.count() - 1
605 # Set ancestors to None to signal that every node is an ancestor.
606 # Set ancestors to None to signal that every node is an ancestor.
606 ancestors = None
607 ancestors = None
607 # Set heads to an empty dictionary for later discovery of heads
608 # Set heads to an empty dictionary for later discovery of heads
608 heads = {}
609 heads = {}
609 else:
610 else:
610 heads = list(heads)
611 heads = list(heads)
611 if not heads:
612 if not heads:
612 return nonodes
613 return nonodes
613 ancestors = {}
614 ancestors = {}
614 # Turn heads into a dictionary so we can remove 'fake' heads.
615 # Turn heads into a dictionary so we can remove 'fake' heads.
615 # Also, later we will be using it to filter out the heads we can't
616 # Also, later we will be using it to filter out the heads we can't
616 # find from roots.
617 # find from roots.
617 heads = dict.fromkeys(heads, 0)
618 heads = dict.fromkeys(heads, 0)
618 # Start at the top and keep marking parents until we're done.
619 # Start at the top and keep marking parents until we're done.
619 nodestotag = heads.keys()
620 nodestotag = heads.keys()
620 # Remember where the top was so we can use it as a limit later.
621 # Remember where the top was so we can use it as a limit later.
621 highestrev = max([self.rev(n) for n in nodestotag])
622 highestrev = max([self.rev(n) for n in nodestotag])
622 while nodestotag:
623 while nodestotag:
623 # grab a node to tag
624 # grab a node to tag
624 n = nodestotag.pop()
625 n = nodestotag.pop()
625 # Never tag nullid
626 # Never tag nullid
626 if n == nullid:
627 if n == nullid:
627 continue
628 continue
628 # A node's revision number represents its place in a
629 # A node's revision number represents its place in a
629 # topologically sorted list of nodes.
630 # topologically sorted list of nodes.
630 r = self.rev(n)
631 r = self.rev(n)
631 if r >= lowestrev:
632 if r >= lowestrev:
632 if n not in ancestors:
633 if n not in ancestors:
633 # If we are possibly a descendent of one of the roots
634 # If we are possibly a descendent of one of the roots
634 # and we haven't already been marked as an ancestor
635 # and we haven't already been marked as an ancestor
635 ancestors[n] = 1 # Mark as ancestor
636 ancestors[n] = 1 # Mark as ancestor
636 # Add non-nullid parents to list of nodes to tag.
637 # Add non-nullid parents to list of nodes to tag.
637 nodestotag.extend([p for p in self.parents(n) if
638 nodestotag.extend([p for p in self.parents(n) if
638 p != nullid])
639 p != nullid])
639 elif n in heads: # We've seen it before, is it a fake head?
640 elif n in heads: # We've seen it before, is it a fake head?
640 # So it is, real heads should not be the ancestors of
641 # So it is, real heads should not be the ancestors of
641 # any other heads.
642 # any other heads.
642 heads.pop(n)
643 heads.pop(n)
643 if not ancestors:
644 if not ancestors:
644 return nonodes
645 return nonodes
645 # Now that we have our set of ancestors, we want to remove any
646 # Now that we have our set of ancestors, we want to remove any
646 # roots that are not ancestors.
647 # roots that are not ancestors.
647
648
648 # If one of the roots was nullid, everything is included anyway.
649 # If one of the roots was nullid, everything is included anyway.
649 if lowestrev > nullrev:
650 if lowestrev > nullrev:
650 # But, since we weren't, let's recompute the lowest rev to not
651 # But, since we weren't, let's recompute the lowest rev to not
651 # include roots that aren't ancestors.
652 # include roots that aren't ancestors.
652
653
653 # Filter out roots that aren't ancestors of heads
654 # Filter out roots that aren't ancestors of heads
654 roots = [n for n in roots if n in ancestors]
655 roots = [n for n in roots if n in ancestors]
655 # Recompute the lowest revision
656 # Recompute the lowest revision
656 if roots:
657 if roots:
657 lowestrev = min([self.rev(n) for n in roots])
658 lowestrev = min([self.rev(n) for n in roots])
658 else:
659 else:
659 # No more roots? Return empty list
660 # No more roots? Return empty list
660 return nonodes
661 return nonodes
661 else:
662 else:
662 # We are descending from nullid, and don't need to care about
663 # We are descending from nullid, and don't need to care about
663 # any other roots.
664 # any other roots.
664 lowestrev = nullrev
665 lowestrev = nullrev
665 roots = [nullid]
666 roots = [nullid]
666 # Transform our roots list into a 'set' (i.e. a dictionary where the
667 # Transform our roots list into a 'set' (i.e. a dictionary where the
667 # values don't matter.
668 # values don't matter.
668 descendents = dict.fromkeys(roots, 1)
669 descendents = dict.fromkeys(roots, 1)
669 # Also, keep the original roots so we can filter out roots that aren't
670 # Also, keep the original roots so we can filter out roots that aren't
670 # 'real' roots (i.e. are descended from other roots).
671 # 'real' roots (i.e. are descended from other roots).
671 roots = descendents.copy()
672 roots = descendents.copy()
672 # Our topologically sorted list of output nodes.
673 # Our topologically sorted list of output nodes.
673 orderedout = []
674 orderedout = []
674 # Don't start at nullid since we don't want nullid in our output list,
675 # Don't start at nullid since we don't want nullid in our output list,
675 # and if nullid shows up in descedents, empty parents will look like
676 # and if nullid shows up in descedents, empty parents will look like
676 # they're descendents.
677 # they're descendents.
677 for r in xrange(max(lowestrev, 0), highestrev + 1):
678 for r in xrange(max(lowestrev, 0), highestrev + 1):
678 n = self.node(r)
679 n = self.node(r)
679 isdescendent = False
680 isdescendent = False
680 if lowestrev == nullrev: # Everybody is a descendent of nullid
681 if lowestrev == nullrev: # Everybody is a descendent of nullid
681 isdescendent = True
682 isdescendent = True
682 elif n in descendents:
683 elif n in descendents:
683 # n is already a descendent
684 # n is already a descendent
684 isdescendent = True
685 isdescendent = True
685 # This check only needs to be done here because all the roots
686 # This check only needs to be done here because all the roots
686 # will start being marked is descendents before the loop.
687 # will start being marked is descendents before the loop.
687 if n in roots:
688 if n in roots:
688 # If n was a root, check if it's a 'real' root.
689 # If n was a root, check if it's a 'real' root.
689 p = tuple(self.parents(n))
690 p = tuple(self.parents(n))
690 # If any of its parents are descendents, it's not a root.
691 # If any of its parents are descendents, it's not a root.
691 if (p[0] in descendents) or (p[1] in descendents):
692 if (p[0] in descendents) or (p[1] in descendents):
692 roots.pop(n)
693 roots.pop(n)
693 else:
694 else:
694 p = tuple(self.parents(n))
695 p = tuple(self.parents(n))
695 # A node is a descendent if either of its parents are
696 # A node is a descendent if either of its parents are
696 # descendents. (We seeded the dependents list with the roots
697 # descendents. (We seeded the dependents list with the roots
697 # up there, remember?)
698 # up there, remember?)
698 if (p[0] in descendents) or (p[1] in descendents):
699 if (p[0] in descendents) or (p[1] in descendents):
699 descendents[n] = 1
700 descendents[n] = 1
700 isdescendent = True
701 isdescendent = True
701 if isdescendent and ((ancestors is None) or (n in ancestors)):
702 if isdescendent and ((ancestors is None) or (n in ancestors)):
702 # Only include nodes that are both descendents and ancestors.
703 # Only include nodes that are both descendents and ancestors.
703 orderedout.append(n)
704 orderedout.append(n)
704 if (ancestors is not None) and (n in heads):
705 if (ancestors is not None) and (n in heads):
705 # We're trying to figure out which heads are reachable
706 # We're trying to figure out which heads are reachable
706 # from roots.
707 # from roots.
707 # Mark this head as having been reached
708 # Mark this head as having been reached
708 heads[n] = 1
709 heads[n] = 1
709 elif ancestors is None:
710 elif ancestors is None:
710 # Otherwise, we're trying to discover the heads.
711 # Otherwise, we're trying to discover the heads.
711 # Assume this is a head because if it isn't, the next step
712 # Assume this is a head because if it isn't, the next step
712 # will eventually remove it.
713 # will eventually remove it.
713 heads[n] = 1
714 heads[n] = 1
714 # But, obviously its parents aren't.
715 # But, obviously its parents aren't.
715 for p in self.parents(n):
716 for p in self.parents(n):
716 heads.pop(p, None)
717 heads.pop(p, None)
717 heads = [n for n in heads.iterkeys() if heads[n] != 0]
718 heads = [n for n in heads.iterkeys() if heads[n] != 0]
718 roots = roots.keys()
719 roots = roots.keys()
719 assert orderedout
720 assert orderedout
720 assert roots
721 assert roots
721 assert heads
722 assert heads
722 return (orderedout, roots, heads)
723 return (orderedout, roots, heads)
723
724
724 def heads(self, start=None, stop=None):
725 def heads(self, start=None, stop=None):
725 """return the list of all nodes that have no children
726 """return the list of all nodes that have no children
726
727
727 if start is specified, only heads that are descendants of
728 if start is specified, only heads that are descendants of
728 start will be returned
729 start will be returned
729 if stop is specified, it will consider all the revs from stop
730 if stop is specified, it will consider all the revs from stop
730 as if they had no children
731 as if they had no children
731 """
732 """
732 if start is None:
733 if start is None:
733 start = nullid
734 start = nullid
734 if stop is None:
735 if stop is None:
735 stop = []
736 stop = []
736 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
737 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
737 startrev = self.rev(start)
738 startrev = self.rev(start)
738 reachable = {startrev: 1}
739 reachable = {startrev: 1}
739 heads = {startrev: 1}
740 heads = {startrev: 1}
740
741
741 parentrevs = self.parentrevs
742 parentrevs = self.parentrevs
742 for r in xrange(startrev + 1, self.count()):
743 for r in xrange(startrev + 1, self.count()):
743 for p in parentrevs(r):
744 for p in parentrevs(r):
744 if p in reachable:
745 if p in reachable:
745 if r not in stoprevs:
746 if r not in stoprevs:
746 reachable[r] = 1
747 reachable[r] = 1
747 heads[r] = 1
748 heads[r] = 1
748 if p in heads and p not in stoprevs:
749 if p in heads and p not in stoprevs:
749 del heads[p]
750 del heads[p]
750
751
751 return [self.node(r) for r in heads]
752 return [self.node(r) for r in heads]
752
753
753 def children(self, node):
754 def children(self, node):
754 """find the children of a given node"""
755 """find the children of a given node"""
755 c = []
756 c = []
756 p = self.rev(node)
757 p = self.rev(node)
757 for r in range(p + 1, self.count()):
758 for r in range(p + 1, self.count()):
758 for pr in self.parentrevs(r):
759 for pr in self.parentrevs(r):
759 if pr == p:
760 if pr == p:
760 c.append(self.node(r))
761 c.append(self.node(r))
761 return c
762 return c
762
763
763 def _match(self, id):
764 def _match(self, id):
764 if isinstance(id, (long, int)):
765 if isinstance(id, (long, int)):
765 # rev
766 # rev
766 return self.node(id)
767 return self.node(id)
767 if len(id) == 20:
768 if len(id) == 20:
768 # possibly a binary node
769 # possibly a binary node
769 # odds of a binary node being all hex in ASCII are 1 in 10**25
770 # odds of a binary node being all hex in ASCII are 1 in 10**25
770 try:
771 try:
771 node = id
772 node = id
772 r = self.rev(node) # quick search the index
773 r = self.rev(node) # quick search the index
773 return node
774 return node
774 except LookupError:
775 except LookupError:
775 pass # may be partial hex id
776 pass # may be partial hex id
776 try:
777 try:
777 # str(rev)
778 # str(rev)
778 rev = int(id)
779 rev = int(id)
779 if str(rev) != id: raise ValueError
780 if str(rev) != id: raise ValueError
780 if rev < 0: rev = self.count() + rev
781 if rev < 0: rev = self.count() + rev
781 if rev < 0 or rev >= self.count(): raise ValueError
782 if rev < 0 or rev >= self.count(): raise ValueError
782 return self.node(rev)
783 return self.node(rev)
783 except (ValueError, OverflowError):
784 except (ValueError, OverflowError):
784 pass
785 pass
785 if len(id) == 40:
786 if len(id) == 40:
786 try:
787 try:
787 # a full hex nodeid?
788 # a full hex nodeid?
788 node = bin(id)
789 node = bin(id)
789 r = self.rev(node)
790 r = self.rev(node)
790 return node
791 return node
791 except TypeError:
792 except TypeError:
792 pass
793 pass
793
794
794 def _partialmatch(self, id):
795 def _partialmatch(self, id):
795 if len(id) < 40:
796 if len(id) < 40:
796 try:
797 try:
797 # hex(node)[:...]
798 # hex(node)[:...]
798 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
799 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
799 node = None
800 node = None
800 for n in self.nodemap:
801 for n in self.nodemap:
801 if n.startswith(bin_id) and hex(n).startswith(id):
802 if n.startswith(bin_id) and hex(n).startswith(id):
802 if node is not None:
803 if node is not None:
803 raise LookupError(_("Ambiguous identifier"))
804 raise LookupError(_("Ambiguous identifier"))
804 node = n
805 node = n
805 if node is not None:
806 if node is not None:
806 return node
807 return node
807 except TypeError:
808 except TypeError:
808 pass
809 pass
809
810
810 def lookup(self, id):
811 def lookup(self, id):
811 """locate a node based on:
812 """locate a node based on:
812 - revision number or str(revision number)
813 - revision number or str(revision number)
813 - nodeid or subset of hex nodeid
814 - nodeid or subset of hex nodeid
814 """
815 """
815
816
816 n = self._match(id)
817 n = self._match(id)
817 if n is not None:
818 if n is not None:
818 return n
819 return n
819 n = self._partialmatch(id)
820 n = self._partialmatch(id)
820 if n:
821 if n:
821 return n
822 return n
822
823
823 raise LookupError(_("No match found"))
824 raise LookupError(_("No match found"))
824
825
825 def cmp(self, node, text):
826 def cmp(self, node, text):
826 """compare text with a given file revision"""
827 """compare text with a given file revision"""
827 p1, p2 = self.parents(node)
828 p1, p2 = self.parents(node)
828 return hash(text, p1, p2) != node
829 return hash(text, p1, p2) != node
829
830
830 def makenode(self, node, text):
831 def makenode(self, node, text):
831 """calculate a file nodeid for text, descended or possibly
832 """calculate a file nodeid for text, descended or possibly
832 unchanged from node"""
833 unchanged from node"""
833
834
834 if self.cmp(node, text):
835 if self.cmp(node, text):
835 return hash(text, node, nullid)
836 return hash(text, node, nullid)
836 return node
837 return node
837
838
838 def diff(self, a, b):
839 def diff(self, a, b):
839 """return a delta between two revisions"""
840 """return a delta between two revisions"""
840 return mdiff.textdiff(a, b)
841 return mdiff.textdiff(a, b)
841
842
842 def patches(self, t, pl):
843 def patches(self, t, pl):
843 """apply a list of patches to a string"""
844 """apply a list of patches to a string"""
844 return mdiff.patches(t, pl)
845 return mdiff.patches(t, pl)
845
846
846 def chunk(self, rev, df=None, cachelen=4096):
847 def chunk(self, rev, df=None, cachelen=4096):
847 start, length = self.start(rev), self.length(rev)
848 start, length = self.start(rev), self.length(rev)
848 inline = self.inlinedata()
849 inline = self.inlinedata()
849 if inline:
850 if inline:
850 start += (rev + 1) * struct.calcsize(self.indexformat)
851 start += (rev + 1) * struct.calcsize(self.indexformat)
851 end = start + length
852 end = start + length
852 def loadcache(df):
853 def loadcache(df):
853 cache_length = max(cachelen, length) # 4k
854 cache_length = max(cachelen, length) # 4k
854 if not df:
855 if not df:
855 if inline:
856 if inline:
856 df = self.opener(self.indexfile)
857 df = self.opener(self.indexfile)
857 else:
858 else:
858 df = self.opener(self.datafile)
859 df = self.opener(self.datafile)
859 df.seek(start)
860 df.seek(start)
860 self.chunkcache = (start, df.read(cache_length))
861 self.chunkcache = (start, df.read(cache_length))
861
862
862 if not self.chunkcache:
863 if not self.chunkcache:
863 loadcache(df)
864 loadcache(df)
864
865
865 cache_start = self.chunkcache[0]
866 cache_start = self.chunkcache[0]
866 cache_end = cache_start + len(self.chunkcache[1])
867 cache_end = cache_start + len(self.chunkcache[1])
867 if start >= cache_start and end <= cache_end:
868 if start >= cache_start and end <= cache_end:
868 # it is cached
869 # it is cached
869 offset = start - cache_start
870 offset = start - cache_start
870 else:
871 else:
871 loadcache(df)
872 loadcache(df)
872 offset = 0
873 offset = 0
873
874
874 #def checkchunk():
875 #def checkchunk():
875 # df = self.opener(self.datafile)
876 # df = self.opener(self.datafile)
876 # df.seek(start)
877 # df.seek(start)
877 # return df.read(length)
878 # return df.read(length)
878 #assert s == checkchunk()
879 #assert s == checkchunk()
879 return decompress(self.chunkcache[1][offset:offset + length])
880 return decompress(self.chunkcache[1][offset:offset + length])
880
881
881 def delta(self, node):
882 def delta(self, node):
882 """return or calculate a delta between a node and its predecessor"""
883 """return or calculate a delta between a node and its predecessor"""
883 r = self.rev(node)
884 r = self.rev(node)
884 return self.revdiff(r - 1, r)
885 return self.revdiff(r - 1, r)
885
886
886 def revdiff(self, rev1, rev2):
887 def revdiff(self, rev1, rev2):
887 """return or calculate a delta between two revisions"""
888 """return or calculate a delta between two revisions"""
888 b1 = self.base(rev1)
889 b1 = self.base(rev1)
889 b2 = self.base(rev2)
890 b2 = self.base(rev2)
890 if b1 == b2 and rev1 + 1 == rev2:
891 if b1 == b2 and rev1 + 1 == rev2:
891 return self.chunk(rev2)
892 return self.chunk(rev2)
892 else:
893 else:
893 return self.diff(self.revision(self.node(rev1)),
894 return self.diff(self.revision(self.node(rev1)),
894 self.revision(self.node(rev2)))
895 self.revision(self.node(rev2)))
895
896
896 def revision(self, node):
897 def revision(self, node):
897 """return an uncompressed revision of a given"""
898 """return an uncompressed revision of a given"""
898 if node == nullid: return ""
899 if node == nullid: return ""
899 if self.cache and self.cache[0] == node: return self.cache[2]
900 if self.cache and self.cache[0] == node: return self.cache[2]
900
901
901 # look up what we need to read
902 # look up what we need to read
902 text = None
903 text = None
903 rev = self.rev(node)
904 rev = self.rev(node)
904 base = self.base(rev)
905 base = self.base(rev)
905
906
906 if self.inlinedata():
907 if self.inlinedata():
907 # we probably have the whole chunk cached
908 # we probably have the whole chunk cached
908 df = None
909 df = None
909 else:
910 else:
910 df = self.opener(self.datafile)
911 df = self.opener(self.datafile)
911
912
912 # do we have useful data cached?
913 # do we have useful data cached?
913 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
914 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
914 base = self.cache[1]
915 base = self.cache[1]
915 text = self.cache[2]
916 text = self.cache[2]
916 self.loadindex(base, rev + 1)
917 self.loadindex(base, rev + 1)
917 else:
918 else:
918 self.loadindex(base, rev + 1)
919 self.loadindex(base, rev + 1)
919 text = self.chunk(base, df=df)
920 text = self.chunk(base, df=df)
920
921
921 bins = []
922 bins = []
922 for r in xrange(base + 1, rev + 1):
923 for r in xrange(base + 1, rev + 1):
923 bins.append(self.chunk(r, df=df))
924 bins.append(self.chunk(r, df=df))
924
925
925 text = self.patches(text, bins)
926 text = self.patches(text, bins)
926
927
927 p1, p2 = self.parents(node)
928 p1, p2 = self.parents(node)
928 if node != hash(text, p1, p2):
929 if node != hash(text, p1, p2):
929 raise RevlogError(_("integrity check failed on %s:%d")
930 raise RevlogError(_("integrity check failed on %s:%d")
930 % (self.datafile, rev))
931 % (self.datafile, rev))
931
932
932 self.cache = (node, rev, text)
933 self.cache = (node, rev, text)
933 return text
934 return text
934
935
935 def checkinlinesize(self, tr, fp=None):
936 def checkinlinesize(self, tr, fp=None):
936 if not self.inlinedata():
937 if not self.inlinedata():
937 return
938 return
938 if not fp:
939 if not fp:
939 fp = self.opener(self.indexfile, 'r')
940 fp = self.opener(self.indexfile, 'r')
940 fp.seek(0, 2)
941 fp.seek(0, 2)
941 size = fp.tell()
942 size = fp.tell()
942 if size < 131072:
943 if size < 131072:
943 return
944 return
944 trinfo = tr.find(self.indexfile)
945 trinfo = tr.find(self.indexfile)
945 if trinfo == None:
946 if trinfo == None:
946 raise RevlogError(_("%s not found in the transaction")
947 raise RevlogError(_("%s not found in the transaction")
947 % self.indexfile)
948 % self.indexfile)
948
949
949 trindex = trinfo[2]
950 trindex = trinfo[2]
950 dataoff = self.start(trindex)
951 dataoff = self.start(trindex)
951
952
952 tr.add(self.datafile, dataoff)
953 tr.add(self.datafile, dataoff)
953 df = self.opener(self.datafile, 'w')
954 df = self.opener(self.datafile, 'w')
954 calc = struct.calcsize(self.indexformat)
955 calc = struct.calcsize(self.indexformat)
955 for r in xrange(self.count()):
956 for r in xrange(self.count()):
956 start = self.start(r) + (r + 1) * calc
957 start = self.start(r) + (r + 1) * calc
957 length = self.length(r)
958 length = self.length(r)
958 fp.seek(start)
959 fp.seek(start)
959 d = fp.read(length)
960 d = fp.read(length)
960 df.write(d)
961 df.write(d)
961 fp.close()
962 fp.close()
962 df.close()
963 df.close()
963 fp = self.opener(self.indexfile, 'w', atomictemp=True)
964 fp = self.opener(self.indexfile, 'w', atomictemp=True)
964 self.version &= ~(REVLOGNGINLINEDATA)
965 self.version &= ~(REVLOGNGINLINEDATA)
965 if self.count():
966 if self.count():
966 x = self.index[0]
967 x = self.index[0]
967 e = struct.pack(self.indexformat, *x)[4:]
968 e = struct.pack(self.indexformat, *x)[4:]
968 l = struct.pack(versionformat, self.version)
969 l = struct.pack(versionformat, self.version)
969 fp.write(l)
970 fp.write(l)
970 fp.write(e)
971 fp.write(e)
971
972
972 for i in xrange(1, self.count()):
973 for i in xrange(1, self.count()):
973 x = self.index[i]
974 x = self.index[i]
974 e = struct.pack(self.indexformat, *x)
975 e = struct.pack(self.indexformat, *x)
975 fp.write(e)
976 fp.write(e)
976
977
977 # if we don't call rename, the temp file will never replace the
978 # if we don't call rename, the temp file will never replace the
978 # real index
979 # real index
979 fp.rename()
980 fp.rename()
980
981
981 tr.replace(self.indexfile, trindex * calc)
982 tr.replace(self.indexfile, trindex * calc)
982 self.chunkcache = None
983 self.chunkcache = None
983
984
984 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
985 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
985 """add a revision to the log
986 """add a revision to the log
986
987
987 text - the revision data to add
988 text - the revision data to add
988 transaction - the transaction object used for rollback
989 transaction - the transaction object used for rollback
989 link - the linkrev data to add
990 link - the linkrev data to add
990 p1, p2 - the parent nodeids of the revision
991 p1, p2 - the parent nodeids of the revision
991 d - an optional precomputed delta
992 d - an optional precomputed delta
992 """
993 """
993 if not self.inlinedata():
994 if not self.inlinedata():
994 dfh = self.opener(self.datafile, "a")
995 dfh = self.opener(self.datafile, "a")
995 else:
996 else:
996 dfh = None
997 dfh = None
997 ifh = self.opener(self.indexfile, "a+")
998 ifh = self.opener(self.indexfile, "a+")
998 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
999 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
999
1000
1000 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1001 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1001 if text is None: text = ""
1002 if text is None: text = ""
1002 if p1 is None: p1 = self.tip()
1003 if p1 is None: p1 = self.tip()
1003 if p2 is None: p2 = nullid
1004 if p2 is None: p2 = nullid
1004
1005
1005 node = hash(text, p1, p2)
1006 node = hash(text, p1, p2)
1006
1007
1007 if node in self.nodemap:
1008 if node in self.nodemap:
1008 return node
1009 return node
1009
1010
1010 n = self.count()
1011 n = self.count()
1011 t = n - 1
1012 t = n - 1
1012
1013
1013 if n:
1014 if n:
1014 base = self.base(t)
1015 base = self.base(t)
1015 start = self.start(base)
1016 start = self.start(base)
1016 end = self.end(t)
1017 end = self.end(t)
1017 if not d:
1018 if not d:
1018 prev = self.revision(self.tip())
1019 prev = self.revision(self.tip())
1019 d = self.diff(prev, text)
1020 d = self.diff(prev, text)
1020 data = compress(d)
1021 data = compress(d)
1021 l = len(data[1]) + len(data[0])
1022 l = len(data[1]) + len(data[0])
1022 dist = end - start + l
1023 dist = end - start + l
1023
1024
1024 # full versions are inserted when the needed deltas
1025 # full versions are inserted when the needed deltas
1025 # become comparable to the uncompressed text
1026 # become comparable to the uncompressed text
1026 if not n or dist > len(text) * 2:
1027 if not n or dist > len(text) * 2:
1027 data = compress(text)
1028 data = compress(text)
1028 l = len(data[1]) + len(data[0])
1029 l = len(data[1]) + len(data[0])
1029 base = n
1030 base = n
1030 else:
1031 else:
1031 base = self.base(t)
1032 base = self.base(t)
1032
1033
1033 offset = 0
1034 offset = 0
1034 if t >= 0:
1035 if t >= 0:
1035 offset = self.end(t)
1036 offset = self.end(t)
1036
1037
1037 if self.version == REVLOGV0:
1038 if self.version == REVLOGV0:
1038 e = (offset, l, base, link, p1, p2, node)
1039 e = (offset, l, base, link, p1, p2, node)
1039 else:
1040 else:
1040 e = (self.offset_type(offset, 0), l, len(text),
1041 e = (self.offset_type(offset, 0), l, len(text),
1041 base, link, self.rev(p1), self.rev(p2), node)
1042 base, link, self.rev(p1), self.rev(p2), node)
1042
1043
1043 self.index.append(e)
1044 self.index.append(e)
1044 self.nodemap[node] = n
1045 self.nodemap[node] = n
1045 entry = struct.pack(self.indexformat, *e)
1046 entry = struct.pack(self.indexformat, *e)
1046
1047
1047 if not self.inlinedata():
1048 if not self.inlinedata():
1048 transaction.add(self.datafile, offset)
1049 transaction.add(self.datafile, offset)
1049 transaction.add(self.indexfile, n * len(entry))
1050 transaction.add(self.indexfile, n * len(entry))
1050 if data[0]:
1051 if data[0]:
1051 dfh.write(data[0])
1052 dfh.write(data[0])
1052 dfh.write(data[1])
1053 dfh.write(data[1])
1053 dfh.flush()
1054 dfh.flush()
1054 else:
1055 else:
1055 ifh.seek(0, 2)
1056 ifh.seek(0, 2)
1056 transaction.add(self.indexfile, ifh.tell(), self.count() - 1)
1057 transaction.add(self.indexfile, ifh.tell(), self.count() - 1)
1057
1058
1058 if len(self.index) == 1 and self.version != REVLOGV0:
1059 if len(self.index) == 1 and self.version != REVLOGV0:
1059 l = struct.pack(versionformat, self.version)
1060 l = struct.pack(versionformat, self.version)
1060 ifh.write(l)
1061 ifh.write(l)
1061 entry = entry[4:]
1062 entry = entry[4:]
1062
1063
1063 ifh.write(entry)
1064 ifh.write(entry)
1064
1065
1065 if self.inlinedata():
1066 if self.inlinedata():
1066 ifh.write(data[0])
1067 ifh.write(data[0])
1067 ifh.write(data[1])
1068 ifh.write(data[1])
1068 self.checkinlinesize(transaction, ifh)
1069 self.checkinlinesize(transaction, ifh)
1069
1070
1070 self.cache = (node, n, text)
1071 self.cache = (node, n, text)
1071 return node
1072 return node
1072
1073
1073 def ancestor(self, a, b):
1074 def ancestor(self, a, b):
1074 """calculate the least common ancestor of nodes a and b"""
1075 """calculate the least common ancestor of nodes a and b"""
1075
1076
1076 def parents(rev):
1077 def parents(rev):
1077 return [p for p in self.parentrevs(rev) if p != nullrev]
1078 return [p for p in self.parentrevs(rev) if p != nullrev]
1078
1079
1079 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1080 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1080 if c is None:
1081 if c is None:
1081 return nullid
1082 return nullid
1082
1083
1083 return self.node(c)
1084 return self.node(c)
1084
1085
1085 def group(self, nodelist, lookup, infocollect=None):
1086 def group(self, nodelist, lookup, infocollect=None):
1086 """calculate a delta group
1087 """calculate a delta group
1087
1088
1088 Given a list of changeset revs, return a set of deltas and
1089 Given a list of changeset revs, return a set of deltas and
1089 metadata corresponding to nodes. the first delta is
1090 metadata corresponding to nodes. the first delta is
1090 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1091 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1091 have this parent as it has all history before these
1092 have this parent as it has all history before these
1092 changesets. parent is parent[0]
1093 changesets. parent is parent[0]
1093 """
1094 """
1094 revs = [self.rev(n) for n in nodelist]
1095 revs = [self.rev(n) for n in nodelist]
1095
1096
1096 # if we don't have any revisions touched by these changesets, bail
1097 # if we don't have any revisions touched by these changesets, bail
1097 if not revs:
1098 if not revs:
1098 yield changegroup.closechunk()
1099 yield changegroup.closechunk()
1099 return
1100 return
1100
1101
1101 # add the parent of the first rev
1102 # add the parent of the first rev
1102 p = self.parents(self.node(revs[0]))[0]
1103 p = self.parents(self.node(revs[0]))[0]
1103 revs.insert(0, self.rev(p))
1104 revs.insert(0, self.rev(p))
1104
1105
1105 # build deltas
1106 # build deltas
1106 for d in xrange(0, len(revs) - 1):
1107 for d in xrange(0, len(revs) - 1):
1107 a, b = revs[d], revs[d + 1]
1108 a, b = revs[d], revs[d + 1]
1108 nb = self.node(b)
1109 nb = self.node(b)
1109
1110
1110 if infocollect is not None:
1111 if infocollect is not None:
1111 infocollect(nb)
1112 infocollect(nb)
1112
1113
1113 d = self.revdiff(a, b)
1114 d = self.revdiff(a, b)
1114 p = self.parents(nb)
1115 p = self.parents(nb)
1115 meta = nb + p[0] + p[1] + lookup(nb)
1116 meta = nb + p[0] + p[1] + lookup(nb)
1116 yield changegroup.genchunk("%s%s" % (meta, d))
1117 yield changegroup.genchunk("%s%s" % (meta, d))
1117
1118
1118 yield changegroup.closechunk()
1119 yield changegroup.closechunk()
1119
1120
1120 def addgroup(self, revs, linkmapper, transaction, unique=0):
1121 def addgroup(self, revs, linkmapper, transaction, unique=0):
1121 """
1122 """
1122 add a delta group
1123 add a delta group
1123
1124
1124 given a set of deltas, add them to the revision log. the
1125 given a set of deltas, add them to the revision log. the
1125 first delta is against its parent, which should be in our
1126 first delta is against its parent, which should be in our
1126 log, the rest are against the previous delta.
1127 log, the rest are against the previous delta.
1127 """
1128 """
1128
1129
1129 #track the base of the current delta log
1130 #track the base of the current delta log
1130 r = self.count()
1131 r = self.count()
1131 t = r - 1
1132 t = r - 1
1132 node = None
1133 node = None
1133
1134
1134 base = prev = nullrev
1135 base = prev = nullrev
1135 start = end = textlen = 0
1136 start = end = textlen = 0
1136 if r:
1137 if r:
1137 end = self.end(t)
1138 end = self.end(t)
1138
1139
1139 ifh = self.opener(self.indexfile, "a+")
1140 ifh = self.opener(self.indexfile, "a+")
1140 ifh.seek(0, 2)
1141 ifh.seek(0, 2)
1141 transaction.add(self.indexfile, ifh.tell(), self.count())
1142 transaction.add(self.indexfile, ifh.tell(), self.count())
1142 if self.inlinedata():
1143 if self.inlinedata():
1143 dfh = None
1144 dfh = None
1144 else:
1145 else:
1145 transaction.add(self.datafile, end)
1146 transaction.add(self.datafile, end)
1146 dfh = self.opener(self.datafile, "a")
1147 dfh = self.opener(self.datafile, "a")
1147
1148
1148 # loop through our set of deltas
1149 # loop through our set of deltas
1149 chain = None
1150 chain = None
1150 for chunk in revs:
1151 for chunk in revs:
1151 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1152 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1152 link = linkmapper(cs)
1153 link = linkmapper(cs)
1153 if node in self.nodemap:
1154 if node in self.nodemap:
1154 # this can happen if two branches make the same change
1155 # this can happen if two branches make the same change
1155 # if unique:
1156 # if unique:
1156 # raise RevlogError(_("already have %s") % hex(node[:4]))
1157 # raise RevlogError(_("already have %s") % hex(node[:4]))
1157 chain = node
1158 chain = node
1158 continue
1159 continue
1159 delta = chunk[80:]
1160 delta = chunk[80:]
1160
1161
1161 for p in (p1, p2):
1162 for p in (p1, p2):
1162 if not p in self.nodemap:
1163 if not p in self.nodemap:
1163 raise LookupError(_("unknown parent %s") % short(p))
1164 raise LookupError(_("unknown parent %s") % short(p))
1164
1165
1165 if not chain:
1166 if not chain:
1166 # retrieve the parent revision of the delta chain
1167 # retrieve the parent revision of the delta chain
1167 chain = p1
1168 chain = p1
1168 if not chain in self.nodemap:
1169 if not chain in self.nodemap:
1169 raise LookupError(_("unknown base %s") % short(chain[:4]))
1170 raise LookupError(_("unknown base %s") % short(chain[:4]))
1170
1171
1171 # full versions are inserted when the needed deltas become
1172 # full versions are inserted when the needed deltas become
1172 # comparable to the uncompressed text or when the previous
1173 # comparable to the uncompressed text or when the previous
1173 # version is not the one we have a delta against. We use
1174 # version is not the one we have a delta against. We use
1174 # the size of the previous full rev as a proxy for the
1175 # the size of the previous full rev as a proxy for the
1175 # current size.
1176 # current size.
1176
1177
1177 if chain == prev:
1178 if chain == prev:
1178 tempd = compress(delta)
1179 tempd = compress(delta)
1179 cdelta = tempd[0] + tempd[1]
1180 cdelta = tempd[0] + tempd[1]
1180 textlen = mdiff.patchedsize(textlen, delta)
1181 textlen = mdiff.patchedsize(textlen, delta)
1181
1182
1182 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1183 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1183 # flush our writes here so we can read it in revision
1184 # flush our writes here so we can read it in revision
1184 if dfh:
1185 if dfh:
1185 dfh.flush()
1186 dfh.flush()
1186 ifh.flush()
1187 ifh.flush()
1187 text = self.revision(chain)
1188 text = self.revision(chain)
1188 text = self.patches(text, [delta])
1189 text = self.patches(text, [delta])
1189 chk = self._addrevision(text, transaction, link, p1, p2, None,
1190 chk = self._addrevision(text, transaction, link, p1, p2, None,
1190 ifh, dfh)
1191 ifh, dfh)
1191 if not dfh and not self.inlinedata():
1192 if not dfh and not self.inlinedata():
1192 # addrevision switched from inline to conventional
1193 # addrevision switched from inline to conventional
1193 # reopen the index
1194 # reopen the index
1194 dfh = self.opener(self.datafile, "a")
1195 dfh = self.opener(self.datafile, "a")
1195 ifh = self.opener(self.indexfile, "a")
1196 ifh = self.opener(self.indexfile, "a")
1196 if chk != node:
1197 if chk != node:
1197 raise RevlogError(_("consistency error adding group"))
1198 raise RevlogError(_("consistency error adding group"))
1198 textlen = len(text)
1199 textlen = len(text)
1199 else:
1200 else:
1200 if self.version == REVLOGV0:
1201 if self.version == REVLOGV0:
1201 e = (end, len(cdelta), base, link, p1, p2, node)
1202 e = (end, len(cdelta), base, link, p1, p2, node)
1202 else:
1203 else:
1203 e = (self.offset_type(end, 0), len(cdelta), textlen, base,
1204 e = (self.offset_type(end, 0), len(cdelta), textlen, base,
1204 link, self.rev(p1), self.rev(p2), node)
1205 link, self.rev(p1), self.rev(p2), node)
1205 self.index.append(e)
1206 self.index.append(e)
1206 self.nodemap[node] = r
1207 self.nodemap[node] = r
1207 if self.inlinedata():
1208 if self.inlinedata():
1208 ifh.write(struct.pack(self.indexformat, *e))
1209 ifh.write(struct.pack(self.indexformat, *e))
1209 ifh.write(cdelta)
1210 ifh.write(cdelta)
1210 self.checkinlinesize(transaction, ifh)
1211 self.checkinlinesize(transaction, ifh)
1211 if not self.inlinedata():
1212 if not self.inlinedata():
1212 dfh = self.opener(self.datafile, "a")
1213 dfh = self.opener(self.datafile, "a")
1213 ifh = self.opener(self.indexfile, "a")
1214 ifh = self.opener(self.indexfile, "a")
1214 else:
1215 else:
1215 dfh.write(cdelta)
1216 dfh.write(cdelta)
1216 ifh.write(struct.pack(self.indexformat, *e))
1217 ifh.write(struct.pack(self.indexformat, *e))
1217
1218
1218 t, r, chain, prev = r, r + 1, node, node
1219 t, r, chain, prev = r, r + 1, node, node
1219 base = self.base(t)
1220 base = self.base(t)
1220 start = self.start(base)
1221 start = self.start(base)
1221 end = self.end(t)
1222 end = self.end(t)
1222
1223
1223 return node
1224 return node
1224
1225
1225 def strip(self, rev, minlink):
1226 def strip(self, rev, minlink):
1226 if self.count() == 0 or rev >= self.count():
1227 if self.count() == 0 or rev >= self.count():
1227 return
1228 return
1228
1229
1229 if isinstance(self.index, lazyindex):
1230 if isinstance(self.index, lazyindex):
1230 self.loadindexmap()
1231 self.loadindexmap()
1231
1232
1232 # When stripping away a revision, we need to make sure it
1233 # When stripping away a revision, we need to make sure it
1233 # does not actually belong to an older changeset.
1234 # does not actually belong to an older changeset.
1234 # The minlink parameter defines the oldest revision
1235 # The minlink parameter defines the oldest revision
1235 # we're allowed to strip away.
1236 # we're allowed to strip away.
1236 while minlink > self.index[rev][-4]:
1237 while minlink > self.index[rev][-4]:
1237 rev += 1
1238 rev += 1
1238 if rev >= self.count():
1239 if rev >= self.count():
1239 return
1240 return
1240
1241
1241 # first truncate the files on disk
1242 # first truncate the files on disk
1242 end = self.start(rev)
1243 end = self.start(rev)
1243 if not self.inlinedata():
1244 if not self.inlinedata():
1244 df = self.opener(self.datafile, "a")
1245 df = self.opener(self.datafile, "a")
1245 df.truncate(end)
1246 df.truncate(end)
1246 end = rev * struct.calcsize(self.indexformat)
1247 end = rev * struct.calcsize(self.indexformat)
1247 else:
1248 else:
1248 end += rev * struct.calcsize(self.indexformat)
1249 end += rev * struct.calcsize(self.indexformat)
1249
1250
1250 indexf = self.opener(self.indexfile, "a")
1251 indexf = self.opener(self.indexfile, "a")
1251 indexf.truncate(end)
1252 indexf.truncate(end)
1252
1253
1253 # then reset internal state in memory to forget those revisions
1254 # then reset internal state in memory to forget those revisions
1254 self.cache = None
1255 self.cache = None
1255 self.chunkcache = None
1256 self.chunkcache = None
1256 for x in xrange(rev, self.count()):
1257 for x in xrange(rev, self.count()):
1257 del self.nodemap[self.node(x)]
1258 del self.nodemap[self.node(x)]
1258
1259
1259 del self.index[rev:]
1260 del self.index[rev:]
1260
1261
1261 def checksize(self):
1262 def checksize(self):
1262 expected = 0
1263 expected = 0
1263 if self.count():
1264 if self.count():
1264 expected = self.end(self.count() - 1)
1265 expected = self.end(self.count() - 1)
1265
1266
1266 try:
1267 try:
1267 f = self.opener(self.datafile)
1268 f = self.opener(self.datafile)
1268 f.seek(0, 2)
1269 f.seek(0, 2)
1269 actual = f.tell()
1270 actual = f.tell()
1270 dd = actual - expected
1271 dd = actual - expected
1271 except IOError, inst:
1272 except IOError, inst:
1272 if inst.errno != errno.ENOENT:
1273 if inst.errno != errno.ENOENT:
1273 raise
1274 raise
1274 dd = 0
1275 dd = 0
1275
1276
1276 try:
1277 try:
1277 f = self.opener(self.indexfile)
1278 f = self.opener(self.indexfile)
1278 f.seek(0, 2)
1279 f.seek(0, 2)
1279 actual = f.tell()
1280 actual = f.tell()
1280 s = struct.calcsize(self.indexformat)
1281 s = struct.calcsize(self.indexformat)
1281 i = actual / s
1282 i = actual / s
1282 di = actual - (i * s)
1283 di = actual - (i * s)
1283 if self.inlinedata():
1284 if self.inlinedata():
1284 databytes = 0
1285 databytes = 0
1285 for r in xrange(self.count()):
1286 for r in xrange(self.count()):
1286 databytes += self.length(r)
1287 databytes += self.length(r)
1287 dd = 0
1288 dd = 0
1288 di = actual - self.count() * s - databytes
1289 di = actual - self.count() * s - databytes
1289 except IOError, inst:
1290 except IOError, inst:
1290 if inst.errno != errno.ENOENT:
1291 if inst.errno != errno.ENOENT:
1291 raise
1292 raise
1292 di = 0
1293 di = 0
1293
1294
1294 return (dd, di)
1295 return (dd, di)
1295
1296
1296
1297
@@ -1,107 +1,124 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 mkdir t
3 mkdir t
4 cd t
4 cd t
5 hg init
5 hg init
6 hg id
6 hg id
7 echo a > a
7 echo a > a
8 hg add a
8 hg add a
9 hg commit -m "test" -d "1000000 0"
9 hg commit -m "test" -d "1000000 0"
10 hg co
10 hg co
11 hg identify
11 hg identify
12 T=`hg tip --debug | head -n 1 | cut -d : -f 3`
12 T=`hg tip --debug | head -n 1 | cut -d : -f 3`
13 hg tag -l "This is a local tag with a really long name!"
13 hg tag -l "This is a local tag with a really long name!"
14 hg tags
14 hg tags
15 rm .hg/localtags
15 rm .hg/localtags
16 echo "$T first" > .hgtags
16 echo "$T first" > .hgtags
17 cat .hgtags
17 cat .hgtags
18 hg add .hgtags
18 hg add .hgtags
19 hg commit -m "add tags" -d "1000000 0"
19 hg commit -m "add tags" -d "1000000 0"
20 hg tags
20 hg tags
21 hg identify
21 hg identify
22 echo bb > a
22 echo bb > a
23 hg status
23 hg status
24 hg identify
24 hg identify
25 hg co first
25 hg co first
26 hg id
26 hg id
27 hg -v id
27 hg -v id
28 hg status
28 hg status
29 echo 1 > b
29 echo 1 > b
30 hg add b
30 hg add b
31 hg commit -m "branch" -d "1000000 0"
31 hg commit -m "branch" -d "1000000 0"
32 hg id
32 hg id
33 hg merge 1
33 hg merge 1
34 hg id
34 hg id
35 hg status
35 hg status
36
36
37 hg commit -m "merge" -d "1000000 0"
37 hg commit -m "merge" -d "1000000 0"
38
38
39 # create fake head, make sure tag not visible afterwards
39 # create fake head, make sure tag not visible afterwards
40 cp .hgtags tags
40 cp .hgtags tags
41 hg tag -d "1000000 0" last
41 hg tag -d "1000000 0" last
42 hg rm .hgtags
42 hg rm .hgtags
43 hg commit -m "remove" -d "1000000 0"
43 hg commit -m "remove" -d "1000000 0"
44
44
45 mv tags .hgtags
45 mv tags .hgtags
46 hg add .hgtags
46 hg add .hgtags
47 hg commit -m "readd" -d "1000000 0"
47 hg commit -m "readd" -d "1000000 0"
48
48
49 hg tags
49 hg tags
50
50
51 # invalid tags
51 # invalid tags
52 echo "spam" >> .hgtags
52 echo "spam" >> .hgtags
53 echo >> .hgtags
53 echo >> .hgtags
54 echo "foo bar" >> .hgtags
54 echo "foo bar" >> .hgtags
55 echo "$T invalid" | sed "s/..../a5a5/" >> .hg/localtags
55 echo "$T invalid" | sed "s/..../a5a5/" >> .hg/localtags
56 hg commit -m "tags" -d "1000000 0"
56 hg commit -m "tags" -d "1000000 0"
57
57
58 # report tag parse error on other head
58 # report tag parse error on other head
59 hg up 3
59 hg up 3
60 echo 'x y' >> .hgtags
60 echo 'x y' >> .hgtags
61 hg commit -m "head" -d "1000000 0"
61 hg commit -m "head" -d "1000000 0"
62
62
63 hg tags
63 hg tags
64 hg tip
64 hg tip
65
65
66 # test tag precedence rules
66 # test tag precedence rules
67 cd ..
67 cd ..
68 hg init t2
68 hg init t2
69 cd t2
69 cd t2
70 echo foo > foo
70 echo foo > foo
71 hg add foo
71 hg add foo
72 hg ci -m 'add foo' -d '1000000 0' # rev 0
72 hg ci -m 'add foo' -d '1000000 0' # rev 0
73 hg tag -d '1000000 0' bar # rev 1
73 hg tag -d '1000000 0' bar # rev 1
74 echo >> foo
74 echo >> foo
75 hg ci -m 'change foo 1' -d '1000000 0' # rev 2
75 hg ci -m 'change foo 1' -d '1000000 0' # rev 2
76 hg up -C 1
76 hg up -C 1
77 hg tag -r 1 -d '1000000 0' -f bar # rev 3
77 hg tag -r 1 -d '1000000 0' -f bar # rev 3
78 hg up -C 1
78 hg up -C 1
79 echo >> foo
79 echo >> foo
80 hg ci -m 'change foo 2' -d '1000000 0' # rev 4
80 hg ci -m 'change foo 2' -d '1000000 0' # rev 4
81 hg tags
81 hg tags
82
82
83 # test tag removal
83 # test tag removal
84 hg tag --remove -d '1000000 0' bar
84 hg tag --remove -d '1000000 0' bar
85 hg tip
85 hg tip
86 hg tags
86 hg tags
87
87
88 # test tag rank
88 # test tag rank
89 cd ..
89 cd ..
90 hg init t3
90 hg init t3
91 cd t3
91 cd t3
92 echo foo > foo
92 echo foo > foo
93 hg add foo
93 hg add foo
94 hg ci -m 'add foo' -d '1000000 0' # rev 0
94 hg ci -m 'add foo' -d '1000000 0' # rev 0
95 hg tag -d '1000000 0' -f bar # rev 1 bar -> 0
95 hg tag -d '1000000 0' -f bar # rev 1 bar -> 0
96 hg tag -d '1000000 0' -f bar # rev 2 bar -> 1
96 hg tag -d '1000000 0' -f bar # rev 2 bar -> 1
97 hg tag -d '1000000 0' -fr 0 bar # rev 3 bar -> 0
97 hg tag -d '1000000 0' -fr 0 bar # rev 3 bar -> 0
98 hg tag -d '1000000 0' -fr 1 bar # rev 3 bar -> 1
98 hg tag -d '1000000 0' -fr 1 bar # rev 3 bar -> 1
99 hg tag -d '1000000 0' -fr 0 bar # rev 4 bar -> 0
99 hg tag -d '1000000 0' -fr 0 bar # rev 4 bar -> 0
100 hg tags
100 hg tags
101 hg co 3
101 hg co 3
102 echo barbar > foo
102 echo barbar > foo
103 hg ci -m 'change foo' -d '1000000 0' # rev 0
103 hg ci -m 'change foo' -d '1000000 0' # rev 0
104 hg tags
104 hg tags
105
105
106 hg tag -d '1000000 0' -r 3 bar # should complain
106 hg tag -d '1000000 0' -r 3 bar # should complain
107 hg tags No newline at end of file
107 hg tags
108
109 # test tag rank with 3 heads
110 cd ..
111 hg init t4
112 cd t4
113 echo foo > foo
114 hg add
115 hg ci -m 'add foo' -d '0 0' # rev 0
116 hg tag -d '0 0' bar # rev 1 bar -> 0
117 hg tag -d '0 0' -f bar # rev 2 bar -> 1
118 hg up -qC 0
119 hg tag -d '0 0' -fr 2 bar # rev 3 bar -> 2
120 hg tags
121 hg up -qC 0
122 hg tag -d '0 0' -m 'retag rev 0' -fr 0 bar # rev 4 bar -> 0, but bar stays at 2
123 echo % bar should still point to rev 2
124 hg tags
@@ -1,59 +1,65 b''
1 unknown
1 unknown
2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 0acdaf898367 tip
3 0acdaf898367 tip
4 tip 0:0acdaf898367
4 tip 0:0acdaf898367
5 This is a local tag with a really long name! 0:0acdaf898367
5 This is a local tag with a really long name! 0:0acdaf898367
6 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 first
6 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 first
7 tip 1:8a3ca90d111d
7 tip 1:8a3ca90d111d
8 first 0:0acdaf898367
8 first 0:0acdaf898367
9 8a3ca90d111d tip
9 8a3ca90d111d tip
10 M a
10 M a
11 8a3ca90d111d+ tip
11 8a3ca90d111d+ tip
12 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
12 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
13 0acdaf898367+ first
13 0acdaf898367+ first
14 0acdaf898367+ first
14 0acdaf898367+ first
15 M a
15 M a
16 8216907a933d tip
16 8216907a933d tip
17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 (branch merge, don't forget to commit)
18 (branch merge, don't forget to commit)
19 8216907a933d+8a3ca90d111d+ tip
19 8216907a933d+8a3ca90d111d+ tip
20 M .hgtags
20 M .hgtags
21 tip 6:e2174d339386
21 tip 6:e2174d339386
22 first 0:0acdaf898367
22 first 0:0acdaf898367
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 .hgtags@c071f74ab5eb, line 2: cannot parse entry
24 .hgtags@c071f74ab5eb, line 2: cannot parse entry
25 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
25 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
26 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
26 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
27 localtags, line 1: tag 'invalid' refers to unknown node
27 localtags, line 1: tag 'invalid' refers to unknown node
28 tip 8:4ca6f1b1a68c
28 tip 8:4ca6f1b1a68c
29 first 0:0acdaf898367
29 first 0:0acdaf898367
30 changeset: 8:4ca6f1b1a68c
30 changeset: 8:4ca6f1b1a68c
31 .hgtags@c071f74ab5eb, line 2: cannot parse entry
31 .hgtags@c071f74ab5eb, line 2: cannot parse entry
32 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
32 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
33 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
33 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
34 localtags, line 1: tag 'invalid' refers to unknown node
34 localtags, line 1: tag 'invalid' refers to unknown node
35 tag: tip
35 tag: tip
36 parent: 3:b2ef3841386b
36 parent: 3:b2ef3841386b
37 user: test
37 user: test
38 date: Mon Jan 12 13:46:40 1970 +0000
38 date: Mon Jan 12 13:46:40 1970 +0000
39 summary: head
39 summary: head
40
40
41 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
41 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
43 tip 4:36195b728445
43 tip 4:36195b728445
44 bar 1:b204a97e6e8d
44 bar 1:b204a97e6e8d
45 changeset: 5:57e1983b4a60
45 changeset: 5:57e1983b4a60
46 tag: tip
46 tag: tip
47 user: test
47 user: test
48 date: Mon Jan 12 13:46:40 1970 +0000
48 date: Mon Jan 12 13:46:40 1970 +0000
49 summary: Removed tag bar
49 summary: Removed tag bar
50
50
51 tip 5:57e1983b4a60
51 tip 5:57e1983b4a60
52 tip 5:d8bb4d1eff25
52 tip 5:d8bb4d1eff25
53 bar 0:b409d9da318e
53 bar 0:b409d9da318e
54 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
54 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
55 tip 6:b5ff9d142648
55 tip 6:b5ff9d142648
56 bar 0:b409d9da318e
56 bar 0:b409d9da318e
57 abort: a tag named bar already exists (use -f to force)
57 abort: a tag named bar already exists (use -f to force)
58 tip 6:b5ff9d142648
58 tip 6:b5ff9d142648
59 bar 0:b409d9da318e
59 bar 0:b409d9da318e
60 adding foo
61 tip 3:ca8479b4351c
62 bar 2:72b852876a42
63 % bar should still point to rev 2
64 tip 4:40af5d225513
65 bar 2:72b852876a42
General Comments 0
You need to be logged in to leave comments. Login now