hirlimann commited on
Commit
ec0b1fc
·
verified ·
1 Parent(s): 7f3e084

Upload folder using huggingface_hub

Browse files
Files changed (50) hide show
  1. .gitattributes +0 -3
  2. ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-7,Head-3,Token-all)_featurizer +0 -0
  3. ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-7,Head-3,Token-all)_indices +1 -0
  4. ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-7,Head-3,Token-all)_inverse_featurizer +0 -0
  5. ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-7,Head-9,Token-all)_featurizer +0 -0
  6. ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-7,Head-9,Token-all)_indices +1 -0
  7. ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-7,Head-9,Token-all)_inverse_featurizer +0 -0
  8. ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-8,Head-10,Token-all)_featurizer +0 -0
  9. ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-8,Head-10,Token-all)_indices +1 -0
  10. ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-8,Head-10,Token-all)_inverse_featurizer +0 -0
  11. ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-8,Head-6,Token-all)_featurizer +0 -0
  12. ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-8,Head-6,Token-all)_indices +1 -0
  13. ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-8,Head-6,Token-all)_inverse_featurizer +0 -0
  14. ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-7,Head-3,Token-all)_featurizer +0 -0
  15. ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-7,Head-3,Token-all)_indices +1 -0
  16. ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-7,Head-3,Token-all)_inverse_featurizer +0 -0
  17. ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-7,Head-9,Token-all)_featurizer +0 -0
  18. ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-7,Head-9,Token-all)_indices +1 -0
  19. ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-7,Head-9,Token-all)_inverse_featurizer +0 -0
  20. ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-8,Head-10,Token-all)_featurizer +0 -0
  21. ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-8,Head-10,Token-all)_indices +1 -0
  22. ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-8,Head-10,Token-all)_inverse_featurizer +0 -0
  23. ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-8,Head-6,Token-all)_featurizer +0 -0
  24. ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-8,Head-6,Token-all)_indices +1 -0
  25. ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-8,Head-6,Token-all)_inverse_featurizer +0 -0
  26. ioi_QwenForCausalLM_output_position/AttentionHead(Layer-7,Head-3,Token-all)_featurizer +0 -0
  27. ioi_QwenForCausalLM_output_position/AttentionHead(Layer-7,Head-3,Token-all)_indices +1 -0
  28. ioi_QwenForCausalLM_output_position/AttentionHead(Layer-7,Head-3,Token-all)_inverse_featurizer +0 -0
  29. ioi_QwenForCausalLM_output_position/AttentionHead(Layer-7,Head-9,Token-all)_featurizer +0 -0
  30. ioi_QwenForCausalLM_output_position/AttentionHead(Layer-7,Head-9,Token-all)_indices +1 -0
  31. ioi_QwenForCausalLM_output_position/AttentionHead(Layer-7,Head-9,Token-all)_inverse_featurizer +0 -0
  32. ioi_QwenForCausalLM_output_position/AttentionHead(Layer-8,Head-10,Token-all)_featurizer +0 -0
  33. ioi_QwenForCausalLM_output_position/AttentionHead(Layer-8,Head-10,Token-all)_indices +1 -0
  34. ioi_QwenForCausalLM_output_position/AttentionHead(Layer-8,Head-10,Token-all)_inverse_featurizer +0 -0
  35. ioi_QwenForCausalLM_output_position/AttentionHead(Layer-8,Head-6,Token-all)_featurizer +0 -0
  36. ioi_QwenForCausalLM_output_position/AttentionHead(Layer-8,Head-6,Token-all)_indices +1 -0
  37. ioi_QwenForCausalLM_output_position/AttentionHead(Layer-8,Head-6,Token-all)_inverse_featurizer +0 -0
  38. ioi_QwenForCausalLM_output_token/AttentionHead(Layer-7,Head-3,Token-all)_featurizer +0 -0
  39. ioi_QwenForCausalLM_output_token/AttentionHead(Layer-7,Head-3,Token-all)_indices +1 -0
  40. ioi_QwenForCausalLM_output_token/AttentionHead(Layer-7,Head-3,Token-all)_inverse_featurizer +0 -0
  41. ioi_QwenForCausalLM_output_token/AttentionHead(Layer-7,Head-9,Token-all)_featurizer +0 -0
  42. ioi_QwenForCausalLM_output_token/AttentionHead(Layer-7,Head-9,Token-all)_indices +1 -0
  43. ioi_QwenForCausalLM_output_token/AttentionHead(Layer-7,Head-9,Token-all)_inverse_featurizer +0 -0
  44. ioi_QwenForCausalLM_output_token/AttentionHead(Layer-8,Head-10,Token-all)_featurizer +0 -0
  45. ioi_QwenForCausalLM_output_token/AttentionHead(Layer-8,Head-10,Token-all)_indices +1 -0
  46. ioi_QwenForCausalLM_output_token/AttentionHead(Layer-8,Head-10,Token-all)_inverse_featurizer +0 -0
  47. ioi_QwenForCausalLM_output_token/AttentionHead(Layer-8,Head-6,Token-all)_featurizer +0 -0
  48. ioi_QwenForCausalLM_output_token/AttentionHead(Layer-8,Head-6,Token-all)_indices +1 -0
  49. ioi_QwenForCausalLM_output_token/AttentionHead(Layer-8,Head-6,Token-all)_inverse_featurizer +0 -0
  50. ioi_linear_params.json +16 -0
.gitattributes CHANGED
@@ -417,6 +417,3 @@ ravel_LlamaForCausalLM_language/ResidualStream(Layer-9,Token-entity_last_token)_
417
  ravel_LlamaForCausalLM_language/ResidualStream(Layer-9,Token-entity_last_token)_inverse_featurizer filter=lfs diff=lfs merge=lfs -text
418
  ravel_LlamaForCausalLM_language/ResidualStream(Layer-9,Token-last_token)_featurizer filter=lfs diff=lfs merge=lfs -text
419
  ravel_LlamaForCausalLM_language/ResidualStream(Layer-9,Token-last_token)_inverse_featurizer filter=lfs diff=lfs merge=lfs -text
420
- arithmetic_LlamaForCausalLM_ones_carry/** filter=lfs diff=lfs merge=lfs -text
421
- ARC_easy_LlamaForCausalLM_answer_pointer/** filter=lfs diff=lfs merge=lfs -text
422
- ARC_easy_LlamaForCausalLM_answer/** filter=lfs diff=lfs merge=lfs -text
 
417
  ravel_LlamaForCausalLM_language/ResidualStream(Layer-9,Token-entity_last_token)_inverse_featurizer filter=lfs diff=lfs merge=lfs -text
418
  ravel_LlamaForCausalLM_language/ResidualStream(Layer-9,Token-last_token)_featurizer filter=lfs diff=lfs merge=lfs -text
419
  ravel_LlamaForCausalLM_language/ResidualStream(Layer-9,Token-last_token)_inverse_featurizer filter=lfs diff=lfs merge=lfs -text
 
 
 
ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-7,Head-3,Token-all)_featurizer ADDED
Binary file (35.9 kB). View file
 
ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-7,Head-3,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-7,Head-3,Token-all)_inverse_featurizer ADDED
Binary file (36 kB). View file
 
ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-7,Head-9,Token-all)_featurizer ADDED
Binary file (35.9 kB). View file
 
ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-7,Head-9,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-7,Head-9,Token-all)_inverse_featurizer ADDED
Binary file (36 kB). View file
 
ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-8,Head-10,Token-all)_featurizer ADDED
Binary file (35.9 kB). View file
 
ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-8,Head-10,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-8,Head-10,Token-all)_inverse_featurizer ADDED
Binary file (36.1 kB). View file
 
ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-8,Head-6,Token-all)_featurizer ADDED
Binary file (35.9 kB). View file
 
ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-8,Head-6,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_GPT2LMHeadModel_output_position/AttentionHead(Layer-8,Head-6,Token-all)_inverse_featurizer ADDED
Binary file (36 kB). View file
 
ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-7,Head-3,Token-all)_featurizer ADDED
Binary file (35.9 kB). View file
 
ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-7,Head-3,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-7,Head-3,Token-all)_inverse_featurizer ADDED
Binary file (36 kB). View file
 
ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-7,Head-9,Token-all)_featurizer ADDED
Binary file (35.9 kB). View file
 
ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-7,Head-9,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-7,Head-9,Token-all)_inverse_featurizer ADDED
Binary file (36 kB). View file
 
ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-8,Head-10,Token-all)_featurizer ADDED
Binary file (35.9 kB). View file
 
ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-8,Head-10,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-8,Head-10,Token-all)_inverse_featurizer ADDED
Binary file (36.1 kB). View file
 
ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-8,Head-6,Token-all)_featurizer ADDED
Binary file (35.9 kB). View file
 
ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-8,Head-6,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_GPT2LMHeadModel_output_token/AttentionHead(Layer-8,Head-6,Token-all)_inverse_featurizer ADDED
Binary file (36 kB). View file
 
ioi_QwenForCausalLM_output_position/AttentionHead(Layer-7,Head-3,Token-all)_featurizer ADDED
Binary file (35.9 kB). View file
 
ioi_QwenForCausalLM_output_position/AttentionHead(Layer-7,Head-3,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_QwenForCausalLM_output_position/AttentionHead(Layer-7,Head-3,Token-all)_inverse_featurizer ADDED
Binary file (36 kB). View file
 
ioi_QwenForCausalLM_output_position/AttentionHead(Layer-7,Head-9,Token-all)_featurizer ADDED
Binary file (35.9 kB). View file
 
ioi_QwenForCausalLM_output_position/AttentionHead(Layer-7,Head-9,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_QwenForCausalLM_output_position/AttentionHead(Layer-7,Head-9,Token-all)_inverse_featurizer ADDED
Binary file (36 kB). View file
 
ioi_QwenForCausalLM_output_position/AttentionHead(Layer-8,Head-10,Token-all)_featurizer ADDED
Binary file (35.9 kB). View file
 
ioi_QwenForCausalLM_output_position/AttentionHead(Layer-8,Head-10,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_QwenForCausalLM_output_position/AttentionHead(Layer-8,Head-10,Token-all)_inverse_featurizer ADDED
Binary file (36.1 kB). View file
 
ioi_QwenForCausalLM_output_position/AttentionHead(Layer-8,Head-6,Token-all)_featurizer ADDED
Binary file (35.9 kB). View file
 
ioi_QwenForCausalLM_output_position/AttentionHead(Layer-8,Head-6,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_QwenForCausalLM_output_position/AttentionHead(Layer-8,Head-6,Token-all)_inverse_featurizer ADDED
Binary file (36 kB). View file
 
ioi_QwenForCausalLM_output_token/AttentionHead(Layer-7,Head-3,Token-all)_featurizer ADDED
Binary file (35.9 kB). View file
 
ioi_QwenForCausalLM_output_token/AttentionHead(Layer-7,Head-3,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_QwenForCausalLM_output_token/AttentionHead(Layer-7,Head-3,Token-all)_inverse_featurizer ADDED
Binary file (36 kB). View file
 
ioi_QwenForCausalLM_output_token/AttentionHead(Layer-7,Head-9,Token-all)_featurizer ADDED
Binary file (35.9 kB). View file
 
ioi_QwenForCausalLM_output_token/AttentionHead(Layer-7,Head-9,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_QwenForCausalLM_output_token/AttentionHead(Layer-7,Head-9,Token-all)_inverse_featurizer ADDED
Binary file (36 kB). View file
 
ioi_QwenForCausalLM_output_token/AttentionHead(Layer-8,Head-10,Token-all)_featurizer ADDED
Binary file (35.9 kB). View file
 
ioi_QwenForCausalLM_output_token/AttentionHead(Layer-8,Head-10,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_QwenForCausalLM_output_token/AttentionHead(Layer-8,Head-10,Token-all)_inverse_featurizer ADDED
Binary file (36.1 kB). View file
 
ioi_QwenForCausalLM_output_token/AttentionHead(Layer-8,Head-6,Token-all)_featurizer ADDED
Binary file (35.9 kB). View file
 
ioi_QwenForCausalLM_output_token/AttentionHead(Layer-8,Head-6,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_QwenForCausalLM_output_token/AttentionHead(Layer-8,Head-6,Token-all)_inverse_featurizer ADDED
Binary file (36 kB). View file
 
ioi_linear_params.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "qwen": {
3
+ "bias": 5.806240767240524,
4
+ "position_coeff": -0.021127492189407515,
5
+ "token_coeff": 0.0019730627536768686,
6
+ "score": 0.0001041717187265645,
7
+ "model_name": "Qwen/Qwen2.5-0.5B"
8
+ },
9
+ "gpt2": {
10
+ "bias": 0.04835956729948521,
11
+ "position_coeff": 2.0046269614249472,
12
+ "token_coeff": 0.7679728791117681,
13
+ "score": 0.6773347707368456,
14
+ "model_name": "openai-community/gpt2"
15
+ }
16
+ }