Browse Source

mod gitignore

main
cetin 9 months ago
parent
commit
8aec15d8db
  1. 11
      .gitignore
  2. 29
      electron_training/result_1_B/matching.hpp
  3. 64
      electron_training/result_6_B/matching.hpp
  4. 46
      electron_training/result_B/matching.hpp
  5. 46
      electron_training/result_B_new/matching.hpp
  6. 46
      electron_training/result_B_old/matching.hpp
  7. 48
      electron_training/result_B_original_weights_residuals/matching.hpp
  8. 2
      electron_training/result_B_original_weights_residuals/og_weights.txt
  9. 47
      electron_training/result_D/matching.hpp
  10. 47
      electron_training/result_D_old/matching.hpp
  11. 46
      electron_training/result_reg_B/matching.hpp
  12. 48
      neural_net_training/result/matching.hpp
  13. 46
      neural_net_training/result_B/matching.hpp
  14. 48
      neural_net_training/result_B_old/matching.hpp
  15. 49
      neural_net_training/result_D/matching.hpp
  16. 47
      neural_net_training/result_D_old/matching.hpp
  17. 58
      nn_electron_training/result/matching.hpp
  18. 46
      nn_electron_training/result_B_old/matching.hpp
  19. 47
      nn_electron_training/result_B_res/matching.hpp
  20. 47
      nn_electron_training/result_D_res/matching.hpp
  21. 48
      nn_electron_training/result_electron_weights/matching.hpp
  22. 62
      nn_electron_training/result_new_var_dtxy/matching.hpp
  23. 63
      nn_electron_training/result_new_variable_dqop/matching.hpp
  24. 17
      nn_trackinglosses_training/result/matching.hpp
  25. 268
      outputs_nn/output_B.txt
  26. 268
      outputs_nn/output_B_res.txt
  27. 0
      outputs_nn/output_D.txt
  28. 268
      outputs_nn/output_D_res.txt
  29. 268
      outputs_nn/output_both.txt
  30. 268
      outputs_nn/output_e_B.txt
  31. 280
      outputs_nn/output_n_B.txt
  32. 268
      outputs_nn/output_og_weights_B.txt
  33. 268
      outputs_nn/output_og_weights_res_bkg_B.txt
  34. 186
      test/ghost_data_new_vars.ipynb
  35. 408
      test/ghost_data_test.ipynb

11
.gitignore

@ -22,3 +22,14 @@ __pycache__
# downloads and envs # downloads and envs
tuner_env tuner_env
miniconda.sh miniconda.sh
# workproducts
arc1_data
arc2_data
electron_training
neural_net_training
nn_electron_training
nn_trackinglosses_training
outputs_nn
test

29
electron_training/result_1_B/matching.hpp

@ -1,29 +0,0 @@
const auto fMin = std::array<simd::float_v, 6>{
{5.23340640939e-05, 1.25644373838e-06, 4.38690185547e-05, 1.90734863281e-06,
4.71249222755e-07, 1.02445483208e-08}};
const auto fMax = std::array<simd::float_v, 6>{{29.9998512268, 0.423314958811,
499.603149414, 499.198486328,
1.36927723885, 0.149392709136}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 8>{
{{0.929669659785881, -9.48043077362455, 10.1715051193127, 2.47667373712576,
-3.58083018257721, 6.45095796912324, 8.45870339869703},
{-0.296042356038334, -9.1896008367615, 5.56711502257143, 17.4486821475108,
-6.40008536792669, -6.6713822283154, 1.07455239812445},
{0.420413986806413, -1.15751488304315, 3.30243747788701, -1.36392382054269,
-0.847138226467055, 4.98479154537921, 4.24441164005755},
{1.5738915069293, -4.98081352303952, 5.8421155864956, -1.57711106103044,
-0.189458896895154, -3.65417561650535, -4.22419444699164},
{-6.66276674820396, 5.45480166931729, -8.03806088012418,
-0.789852234746539, -1.43435711944003, -4.01961155923308,
-14.0834092140066},
{0.817584255737394, 9.67890702465868, -1.76653199291165, -2.6610635109901,
2.51931906192722, -6.76406907184251, 0.968242938156462},
{-0.79066972900182, -0.617757099680603, 0.740878002718091,
0.681870030239224, -1.20759406685829, 0.769290467724204,
-1.8437808630988},
{1.88483370881801, 0.775741479584514, 0.214825824623319, 1.61128446188167,
1.00658692249476, 0.0826679173714486, -1.12220164589225}}};
const auto fWeightMatrix1to2 = std::array<simd::float_v, 9>{
{-1.73457594569937, -1.67600294506992, 1.88966364345853, 1.18946138791835,
2.47648351789816, -1.24466771533151, -0.315569517202675, 0.530105674163753,
3.05297057699491}};

64
electron_training/result_6_B/matching.hpp

@ -1,64 +0,0 @@
const auto fMin = std::array<simd::float_v, 6>{
{5.23340640939e-05, 1.25644373838e-06, 4.38690185547e-05, 1.90734863281e-06,
4.71249222755e-07, 1.02445483208e-08}};
const auto fMax = std::array<simd::float_v, 6>{{29.9998512268, 0.423314958811,
499.603149414, 499.198486328,
1.36927723885, 0.149392709136}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 12>{
{{1.10989365682333, -0.400262341428031, 0.703648655520529,
-10.0488008412327, 2.24766437644792, -1.51561555364132,
-8.19659986380238},
{0.168629698489816, -0.235222573459749, -2.76479490713939,
4.70755796881564, 0.422213317504099, -2.15975111024372,
-0.0413862540873273},
{-1.20008716401126, 5.98181458205593, 1.97530107863487, 0.951399694875291,
3.21037292600378, -1.88398815839093, 6.00348890738369},
{1.59529309197505, 1.03059763992094, -1.28481350389235, 1.77750648317864,
1.66698562433363, 0.560549629043751, -0.646784291824832},
{-10.5582477166915, 1.83421764351223, -4.28308784555713, 2.73941897264262,
-1.09755306824252, -2.76940523423182, -13.1324718956297},
{-1.37726196850241, 1.6684137449588, 0.234563275112263, 0.889405325109031,
1.24137671714337, -0.240977390196439, -2.00650503697469},
{-0.0917280130282914, -6.60741151288151, 4.280141752342, 15.8869539382336,
-4.40078451860264, -11.63552941888, -2.23848664347195},
{1.72810153197739, 1.81133984072885, 1.53310134343984, 1.53430340675608,
-0.880657747996044, -1.01002428097867, 0.327772484279249},
{0.450749853210101, -10.427522498238, 10.1106981167422, 2.50275117049706,
-3.96268925724634, 7.80062171624392, 8.13617432588314},
{-0.899044020226273, 4.04967555584356, -0.184515937391125,
0.605936074234893, 2.11314319461295, 1.08529920345605, 5.198893026323},
{-4.62555398916988, 2.56629651777862, -5.19280819069721, 0.979353155613104,
0.362510005701342, -0.387373325452426, -4.51347844411621},
{0.43181068852013, -1.12870359395317, -5.59123177894442, -2.78683035529746,
-0.119944490657953, -4.22887938179223, -12.1803091805475}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 13>, 6>{
{{-1.67244648790854, 2.58776560386115, 1.05350530586878, 1.12701723441192,
0.309436118156294, -1.41275414644542, -2.14966008423622,
-0.311448006165657, 0.485736777594352, -2.55661662619223,
0.96538530983999, -3.26795296807062, 0.988977174263089},
{-0.381498099253961, -0.549200770730038, -0.893363207717135,
0.119028293459292, 5.13224785809454, 1.77747846865563, -1.7072596641081,
0.0171890434060519, -0.612613204335275, 1.49948177816202,
0.230169849172349, -0.177176079772119, 3.44507835207359},
{-1.20063578327457, 1.63342807940049, -2.53476436290309, -1.5305832886762,
-3.05946450928802, 0.360300407115462, 0.625027143539907,
-1.77680947527138, -0.585041547463601, -2.08759735767147,
0.925138221824412, -1.24854533226616, 2.0502994330023},
{-1.36610982082625, -1.68603095079278, 1.93369535731656, 2.38299921699452,
0.133785811268423, -0.941203171967918, 2.97186174778511, 1.15122509873234,
0.135596009829977, -0.62708569660126, -0.024554433907907,
-0.555962579400608, 0.581541394004209},
{0.349027399089585, 0.0804040832557828, -0.454499280002817,
-1.17318303808809, 0.292596492448844, 0.801032353759436,
0.760037949875418, 0.22815167017283, 0.315794043406641,
-0.969493545848479, -1.03825660899029, 1.94713626859943,
-2.1389717446658},
{-1.88715819596171, 0.277545438410592, -1.68976255449697,
-1.02675310905861, 0.226775035076775, -1.07682401936394,
-0.52218117899507, -1.8253408434363, -1.94344181953331,
-0.444301427484403, -0.343612121595328, -0.177028285618245,
-0.648349320508864}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 7>{
{-0.844891680754208, 0.967426474103726, 0.960945561425279,
-0.80019723500702, -0.545585546409515, 0.3310030293198,
-2.29115821922715}};

46
electron_training/result_B/matching.hpp

@ -1,46 +0,0 @@
const auto fMin = std::array<simd::float_v, 6>{
{5.23340640939e-05, 1.25644373838e-06, 4.38690185547e-05, 1.90734863281e-06,
4.71249222755e-07, 1.02445483208e-08}};
const auto fMax = std::array<simd::float_v, 6>{{29.9998512268, 0.423314958811,
499.603149414, 499.198486328,
1.36927723885, 0.149392709136}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 8>{
{{2.57491955820114, -0.0666351688682796, -3.61648923844569,
-8.75747553035328, 0.639385257730277, 10.5129455348455, 1.22080436670381},
{-0.839608322851951, -11.7978768103865, 7.24641653369801, 6.63011679030303,
-5.38496571510758, 8.30851076817151, 6.69929919816849},
{-0.17630967374162, -3.75940580347415, 2.79889282638498, 16.7594894489781,
-3.10824840396248, -8.80345141844331, 3.9130937162361},
{0.19092161829683, -3.60963385297311, 11.9569759734039, 2.10283509156704,
-2.39101707207304, -1.89478969715624, 7.1165950679585},
{-3.02014034882022, 2.24487587036827, -8.90770349935038, -4.05040202238185,
-1.36813505779681, 9.14630004607903, -3.34618937758505},
{0.459674275912345, 8.12262886393506, 0.45018729587823, -1.11787227534737,
2.8096254085019, -0.481877520480143, 7.78611195142966},
{-0.79066972900182, -0.617757099680603, 0.740878002718091,
0.681870030239224, -1.20759406685829, 0.769290467724204,
-1.8437808630988},
{1.8337752666274, 0.841018614520629, 0.272259015077869, 1.63031107650108,
0.987469718084883, 0.0999586200250234, -1.13752770875358}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 9>, 6>{
{{-0.0482883134570299, 0.46403980819019, 2.73665245864103,
0.245936163361116, -0.472281505442891, 0.307317690224363,
1.63438201788813, -1.44341215808597, -0.706584289774802},
{-3.91297125261727, 0.681495111998297, -3.37155822025346,
-0.966831590652637, 2.65933759421044, -0.661174079209186,
-1.61531096417457, 0.0991696473049824, -4.51523108840722},
{0.273186686950073, 1.14087516410812, 0.653137998266985,
-0.158819017566112, 0.692268877136322, -8.04912219449925,
-0.825543426908553, -1.92132463640843, -2.47870678055356},
{0.180394111293318, -0.414717927339332, -1.44129610325848,
-1.86532392228702, -0.806791495297171, -1.73521704274739,
1.61348068527877, -1.66550797875971, -0.927403017991324},
{-0.790929106392951, -0.0886126272927867, 0.035682993929273,
-0.602424006939674, 0.334723143379322, 2.22416454606917,
-0.848898627795279, 0.743857937018801, -0.291005217785123},
{-0.681492967014666, -0.368602644948209, 1.52403393057559,
-1.06212309361209, 0.881062654352226, 0.690165878288055,
-1.52203810494393, 1.63217238068739, 2.76628946224152}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 7>{
{-0.692725959420674, 1.18375950895893, -1.13672009847538, 0.407788542121486,
-0.606866044733726, 0.927912329413981, -0.887231003739174}};

46
electron_training/result_B_new/matching.hpp

@ -1,46 +0,0 @@
const auto fMin = std::array<simd::float_v, 6>{
{5.23340640939e-05, 1.25644373838e-06, 4.38690185547e-05, 1.90734863281e-06,
4.71249222755e-07, 1.02445483208e-08}};
const auto fMax = std::array<simd::float_v, 6>{{29.9998512268, 0.423314958811,
499.603149414, 499.198486328,
1.36927723885, 0.149392709136}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 8>{
{{1.10509804069795, -1.06659111536073, -1.23304417235305, -9.91292225685574,
1.00704133279785, 9.53100159268659, -0.793174916006915},
{-0.776689382841375, -10.4158785961964, 8.69653776953056, 6.84445159227452,
-6.97657257253127, 6.63766574651487, 7.11596889066532},
{-0.381785768656306, -5.11642852466812, 3.59950933567307, 16.792587073888,
-3.83635033235741, -7.72761443893271, 3.58572441569503},
{1.04413334688141, -3.78312149763691, 9.83287128246016, 1.4778662654192,
-2.0766161850877, 1.08288357164774, 8.02887163423859},
{-3.94899781448378, 1.94391204753919, -8.65991195739853, -2.00834934461626,
-3.50457026010403, 4.99589301163709, -6.89137092011374},
{-1.29549202700169, 7.66739081183929, 0.281901363288286, -1.19821907042793,
2.92107740687058, 1.14948481762706, 7.31015879384667},
{-0.79066972900182, -0.617757099680603, 0.740878002718091,
0.681870030239224, -1.20759406685829, 0.769290467724204,
-1.8437808630988},
{1.76053743491788, 0.909858152169371, 0.323489900540112, 1.61941068281945,
0.92342774005317, 0.0522421825019989, -1.23071245493981}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 9>, 6>{
{{-0.12420793824361, 0.0833900748795987, 3.10092151009265,
0.320087923870887, -0.582151496774453, 0.029470772407136,
1.63438201788813, -1.43480874379498, 1.55528937765131},
{-2.8947400885087, 0.336449303963403, -2.30774902952597, -2.03456375507124,
2.29485822558142, 0.145499754959071, -1.61531096417457,
0.0991522125616504, -6.41616204842718},
{1.75720706890165, 1.241283421626, 0.607968086927335, -0.816112122281315,
0.0294391273974063, -7.94349478092389, -0.825543426908553,
-1.917979348207, -2.03720925778068},
{0.276286323447054, -0.393087539092168, -1.44329478350452,
-1.86277301712902, -0.807222527397035, -1.7239133524486, 1.61348068527877,
-1.66550797875971, -0.966703432130041},
{-1.20952640410995, -1.34067444254507, 0.079870798547199,
-0.0280804827435552, 0.15103668191983, 2.28974121850533,
-0.848898627795279, 0.747603604163112, 0.000485431747120298},
{-0.806478361586365, -0.902043622848205, 2.75668355569402,
-0.636341321727925, 0.189229471295501, 1.41597159860703,
-1.52203810494393, 1.62460924160209, 1.94946724799691}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 7>{
{-1.05665248808677, 1.16854173340171, -0.924262662063758, 0.441514927697916,
-0.908730180794495, 1.02616776486021, 1.26618724664255}};

46
electron_training/result_B_old/matching.hpp

@ -1,46 +0,0 @@
const auto fMin = std::array<simd::float_v, 6>{
{2.32376150961e-05, 1.07555320028e-06, 1.33514404297e-05, 3.0517578125e-05,
3.99723649025e-06, 4.65661287308e-09}};
const auto fMax = std::array<simd::float_v, 6>{{29.9999885559, 0.509573578835,
498.591552734, 499.918823242,
1.35891008377, 0.149692088366}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 8>{
{{1.09689919364338, -2.36337032185014, -3.02921316084911, -8.60965194111848,
1.07308849187722, 11.2080534568785, -0.962205787111116},
{0.742505004354826, -11.5991419169641, 4.4706991652213, 12.0524034815861,
-7.39781510361567, -0.213355059289303, 2.43301548168847},
{-0.725034235213988, -4.7645569874331, 3.41021029475148, 18.2505819659489,
-2.28931892322383, -6.70009514891697, 7.19788851418639},
{-0.43322070581816, -6.76514244197456, 13.847487618501, 5.02461005105822,
-3.37683447138325, 0.858009838318498, 10.273453699814},
{-5.70448188875026, 5.26491831063117, -11.5555643412233, 3.1883356042284,
-2.133677285889, -2.24006224305986, -8.63987163868301},
{-1.06391634270892, 9.01667090199045, -1.28516566899228, -3.82841187857546,
3.18471029451158, 3.67902076971672, 7.29632098310751},
{-0.79066972900182, -0.617757099680603, 0.740878002718091,
0.681870030239224, -1.20759406685829, 0.769290467724204,
-1.8437808630988},
{1.68628281779244, 0.945006908224918, 0.536427352104393, 1.40667951887796,
0.832049300778026, -0.1089543073399, -1.43675125780786}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 9>, 6>{
{{1.21826975700184, 0.363147471400374, 2.05773388885566, 0.540313557549193,
0.420913357653504, -2.44884689863901, 1.63438201788813, -1.40944934303207,
-2.32871515871553},
{-2.90439321892223, 0.719436509209912, -3.93523150198893,
-1.07342541319164, 2.07689989754924, -2.39788444100381, -1.61531096417457,
0.0991887634515266, -8.04764734753152},
{-2.98923948872248, 2.26253234310036, -0.220642963100105,
-0.279316661053141, 0.0331794243552215, -5.88142829451649,
-0.825543426908553, -1.92002983781799, -8.21361341703474},
{0.662904361912077, -0.885584946591213, -1.45517778095535,
-1.89901295762029, -0.806428733926438, -1.81021900435817,
1.61348068527877, -1.66550797875971, -1.51013848461449},
{-1.37437469030598, -2.21755157129085, 1.33360411388341,
-0.0320979297776227, 0.290980167206705, 2.38901360605064,
-0.848898627795279, 0.766669058008792, 0.257937241570605},
{-1.09504185612819, -0.458703315043996, 1.03883522785983,
-1.05014637612802, 0.806301762243297, 2.21317466894066, -1.52203810494393,
1.5559212254549, 3.4514658408796}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 7>{
{-0.533136674890316, 1.09880505630834, -0.802064163473207, 1.60699693080913,
-0.951610170601364, 0.802378806704215, -1.20342886768673}};

48
electron_training/result_B_original_weights_residuals/matching.hpp

@ -1,48 +0,0 @@
const auto fMin = std::array<simd::float_v, 6>{
{0.000315562472679, 1.14277577268e-06, 0.000274658203125, 0.000102996826172,
1.25970691442e-05, 4.93600964546e-08}};
const auto fMax =
std::array<simd::float_v, 6>{{29.9998435974, 0.431377887726, 490.802429199,
497.135681152, 1.3582059145, 0.147097736597}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 8>{
{{0.925466511495192, -3.04696651693941, 1.48703910101059, -2.74358886415853,
-1.54108875912546, 5.95181992279351, 1.09520524639045},
{0.225076264550494, -5.30106552386163, 6.29832156309508, 7.0574368868963,
-3.96436697758889, -0.110687606346842, 4.65556823990769},
{0.859449093477803, -1.42364618189426, 1.52973494084549, 1.63204418679045,
0.402800627021359, 2.02973355392681, 1.61963813362595},
{0.797017653476011, 1.80207629996926, 1.98407671947614, -4.84738045778757,
-0.237330392456841, 0.555272132234374, -0.334695720441674},
{-0.0089249002524941, -0.0721593078491391, -4.03962401066098,
-0.741196499782838, -0.520561836165389, -2.43469377130746,
-5.05370729239864},
{-0.324849815552061, 0.571642144152413, -2.26163157259376,
-3.96363877139044, 3.80954499156217, 0.812071601189534,
-0.388923872459538},
{-0.79066972900182, -0.617757099680603, 0.740878002718091,
0.681870030239224, -1.20759406685829, 0.769290467724204,
-1.8437808630988},
{1.90248604788051, 0.75183501464588, 0.163545686727045, 1.62950884794052,
1.04315466999792, 0.0204618414445436, -1.06300958722802}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 9>, 6>{
{{-3.75265433194304, -4.03077590921609, 0.560952725012946, 1.59903822739795,
2.24144673906438, 0.377410282165578, 1.63438201788813, -1.47007146768882,
-2.58605490949786},
{-2.60068359938433, -0.0309587124576335, -0.841839408810281,
-0.377592041175285, 0.266402105492061, -1.93675266037507,
-1.61531096417457, 0.0988426038328682, 1.51287715061537},
{-0.257103489627825, 2.38563057831866, -2.06682010253696,
-1.50490219717468, 0.990281758525445, -2.89728072212192,
-0.825543426908553, -1.91692155046286, -0.469424293810405},
{0.680066470317009, -0.353277604226862, -1.4315209802379,
-1.86345277642716, -0.806051898385157, -1.70690619012381,
1.61348068527877, -1.66550797875971, -0.804637203024864},
{0.292121929684041, 1.67922513643505, -0.2207750830665, -1.85432148737195,
-0.761761120528791, 0.148603794427115, -0.848898627795279,
0.776926680814688, 0.515413675465116},
{0.436091761386387, -1.32454758986161, 1.0014013582506, -0.251981066947133,
-0.176482975086784, -0.862489698728272, -1.52203810494393,
1.59929932442845, 0.257302379473767}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 7>{
{-3.33863999066044, 1.86100137130551, -0.825426355935299, 0.455646208323886,
-0.730374866334297, 1.65923402820956, 1.60305190554767}};

2
electron_training/result_B_original_weights_residuals/og_weights.txt

@ -1,2 +0,0 @@
signal: only electrons that are true match but mlp response "no match"
background: all ghost tracks

47
electron_training/result_D/matching.hpp

@ -1,47 +0,0 @@
const auto fMin = std::array<simd::float_v, 6>{
{1.4334048501e-05, 1.21005723486e-06, 0.0001220703125, 4.57763671875e-05,
6.51180744171e-06, 5.58793544769e-09}};
const auto fMax = std::array<simd::float_v, 6>{{29.9999580383, 0.388462841511,
497.0078125, 499.509338379,
1.34583222866, 0.148980647326}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 8>{
{{1.80376907529412, -3.94522006510378, 0.731592325680377, -11.2008566094574,
0.679894249961926, 3.84839657473663, -8.07142111719402},
{-2.07030049796095, 3.26515474867444, 0.320697671285593,
-0.564334829758113, 3.06552235842096, 2.6605948885273, 4.6955026167446},
{0.896838903613344, -1.89786210758941, 6.19791014227951, 13.9534201571522,
-9.67617750026742, -5.13735275462149, 5.25604022070252},
{0.350723542258884, -5.46236108010248, 11.3961275449655, 6.86860356458193,
-4.10979098519269, 7.40355727094559, 17.4195097954786},
{-4.98089321802183, -3.06924425120168, -4.16533013325382,
-1.76525268144874, -0.574266009689755, 1.38792795938214,
-11.9738574538811},
{-1.3792553381734, 6.6360108241851, 1.58470490969407, 1.2116201192747,
3.35950082512036, -2.69400720014141, 5.78773380456927},
{-0.79066972900182, -0.617757099680603, 0.740878002718091,
0.681870030239224, -1.20759406685829, 0.769290467724204,
-1.8437808630988},
{1.89618444435453, 0.756762886971226, 0.224607628956107, 1.58224418687104,
1.01198188838621, 0.0609287072816972, -1.10149714422957}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 9>, 6>{
{{-1.95030391768864, -2.57241200745428, 2.22513961422549, 1.09636924630915,
-1.41670442059213, 0.268826912887813, 1.63438201788813, -1.41788409846508,
-0.529591299831198},
{-1.7671292981577, -0.123369349217712, -0.866977288876212,
-0.90631173560288, 2.47901931013162, -2.21695976800688, -1.61531096417457,
0.0991885303988918, 1.28955047096799},
{-0.534089470091784, 1.26461913447419, -0.403013723511741,
-0.758910423086654, -0.92644473334079, -1.52818746990179,
-0.825543426908553, -1.92721239362346, 3.26105888866326},
{0.198023173442802, -0.564145037694825, -1.43289188975828,
-1.86352960767302, -0.808447979295006, -1.71330811211152,
1.61348068527877, -1.66550797875971, -0.910801619527772},
{-1.8227621068219, -0.698025453596766, 0.233245541019916,
-0.387306932182454, -1.05004029412037, -0.333220104163044,
-0.848898627795279, 0.782822716993409, -0.262552968051654},
{-0.314822731076892, -1.50428335429844, 0.179689344301775,
-0.325249075131384, -0.635962383213103, -0.491817587388958,
-1.52203810494393, 1.59283237353393, -0.153430533462156}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 7>{
{-0.572477318545036, 1.12222369845671, 0.962011482756679, 0.427739156517669,
-1.31864386562843, 1.61500835143017, -1.96827876292426}};

47
electron_training/result_D_old/matching.hpp

@ -1,47 +0,0 @@
const auto fMin = std::array<simd::float_v, 6>{
{1.4334048501e-05, 1.20995343877e-06, 0.000255584716797, 7.62939453125e-05,
1.95447355509e-05, 9.31322574615e-09}};
const auto fMax = std::array<simd::float_v, 6>{{29.9999771118, 0.480875372887,
497.208251953, 499.789672852,
1.33854484558, 0.149920448661}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 8>{
{{-2.53490305244316, -3.87226566727903, -4.46377668521249, 3.76593347190621,
-0.736147925273483, 9.02585614570327, 3.6237871734026},
{0.549076542016505, -6.76487588278314, 12.7146884747338, 6.35190577761167,
-4.4556955709276, 8.26540577622736, 18.015709088484},
{-0.456124601513526, -3.87828385698417, 6.13829079533624, 20.024096872054,
-10.2437102287476, -13.5453994008865, 1.47951238998312},
{0.81464719007702, -5.60573514166659, 7.32263078411251, -5.0446935011349,
-0.701597395356833, 6.4873077480756, 1.05029191775837},
{-5.20529068292815, -1.29341688678248, -12.9211101623102, 4.06192896781978,
-2.24819687530499, -4.47649653096685, -18.7962996196447},
{-1.92319074705205, 9.13989051160526, 1.4372857889395, 3.71255897752862,
2.12080932540223, 0.775519813919651, 13.1780255071529},
{-0.79066972900182, -0.617757099680603, 0.740878002718091,
0.681870030239224, -1.20759406685829, 0.769290467724204,
-1.8437808630988},
{2.48032981707208, 1.99122468030675, 0.147128688791476, 3.20653226030149,
-1.59262641780577, 1.63473498915926, -0.983318163281607}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 9>, 6>{
{{-1.23665952123739, 1.12167249551526, 0.882383882593374,
-0.923566479348657, 0.0338835472680554, 1.00667491483647,
1.63438201788813, -4.23394622989637, -0.990743251230407},
{-1.17099975681859, -0.0097715608108182, -0.404796049870115,
0.85424890495973, 3.98024762276004, -0.145761966096377, -1.61531096417457,
0.0969352107124514, -5.51833020463238},
{2.4380083430657, 2.00418225228056, -0.792776990439125, -2.80847623446533,
-0.137631196353825, -7.80633161173606, -0.825543426908553,
-2.37988842437401, -6.27310694945946},
{0.225004410426863, -0.39049833080813, -1.43244348842572,
-1.86390252348813, -0.816126277640337, -1.7092593780967, 1.61348068527877,
-1.66550797875971, -0.984918093686436},
{-0.238610540704029, -0.352025987714205, 1.91071878135911,
1.06903719015662, -0.0879085084653824, -5.01732651510019,
-0.848898627795279, 0.301171033553613, -3.31246000228067},
{1.06098527822605, -1.02241211107063, -0.727572693909408,
-0.164960522101848, 0.834295993060446, -0.816864575663688,
-1.52203810494393, 1.63565593112606, 0.829470327557413}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 7>{
{-0.547016315428893, 0.794153417512957, -0.534810951888594,
0.411807752872488, -1.06003774378693, 0.679060736847631,
0.119300486730084}};

46
electron_training/result_reg_B/matching.hpp

@ -1,46 +0,0 @@
const auto fMin = std::array<simd::float_v, 6>{
{5.23340640939e-05, 1.25644373838e-06, 4.38690185547e-05, 1.90734863281e-06,
4.71249222755e-07, 1.02445483208e-08}};
const auto fMax = std::array<simd::float_v, 6>{{29.9998512268, 0.423314958811,
499.603149414, 499.198486328,
1.36927723885, 0.149392709136}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 8>{
{{2.57491955820114, -0.0666351688682796, -3.61648923844569,
-8.75747553035328, 0.639385257730277, 10.5129455348455, 1.22080436670381},
{-0.839608322851951, -11.7978768103865, 7.24641653369801, 6.63011679030303,
-5.38496571510758, 8.30851076817151, 6.69929919816849},
{-0.17630967374162, -3.75940580347415, 2.79889282638498, 16.7594894489781,
-3.10824840396248, -8.80345141844331, 3.9130937162361},
{0.19092161829683, -3.60963385297311, 11.9569759734039, 2.10283509156704,
-2.39101707207304, -1.89478969715624, 7.1165950679585},
{-3.02014034882022, 2.24487587036827, -8.90770349935038, -4.05040202238185,
-1.36813505779681, 9.14630004607903, -3.34618937758505},
{0.459674275912345, 8.12262886393506, 0.45018729587823, -1.11787227534737,
2.8096254085019, -0.481877520480143, 7.78611195142966},
{-0.79066972900182, -0.617757099680603, 0.740878002718091,
0.681870030239224, -1.20759406685829, 0.769290467724204,
-1.8437808630988},
{1.8337752666274, 0.841018614520629, 0.272259015077869, 1.63031107650108,
0.987469718084883, 0.0999586200250234, -1.13752770875358}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 9>, 6>{
{{-0.0482883134570299, 0.46403980819019, 2.73665245864103,
0.245936163361116, -0.472281505442891, 0.307317690224363,
1.63438201788813, -1.44341215808597, -0.706584289774802},
{-3.91297125261727, 0.681495111998297, -3.37155822025346,
-0.966831590652637, 2.65933759421044, -0.661174079209186,
-1.61531096417457, 0.0991696473049824, -4.51523108840722},
{0.273186686950073, 1.14087516410812, 0.653137998266985,
-0.158819017566112, 0.692268877136322, -8.04912219449925,
-0.825543426908553, -1.92132463640843, -2.47870678055356},
{0.180394111293318, -0.414717927339332, -1.44129610325848,
-1.86532392228702, -0.806791495297171, -1.73521704274739,
1.61348068527877, -1.66550797875971, -0.927403017991324},
{-0.790929106392951, -0.0886126272927867, 0.035682993929273,
-0.602424006939674, 0.334723143379322, 2.22416454606917,
-0.848898627795279, 0.743857937018801, -0.291005217785123},
{-0.681492967014666, -0.368602644948209, 1.52403393057559,
-1.06212309361209, 0.881062654352226, 0.690165878288055,
-1.52203810494393, 1.63217238068739, 2.76628946224152}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 7>{
{-0.692725959420674, 1.18375950895893, -1.13672009847538, 0.407788542121486,
-0.606866044733726, 0.927912329413981, -0.887231003739174}};

48
neural_net_training/result/matching.hpp

@ -1,48 +0,0 @@
const auto fMin = std::array<simd::float_v, 6>{
{1.32643926918e-05, 1.20999777664e-06, 3.81469726562e-06, 1.52587890625e-05,
2.20164656639e-06, 1.86264514923e-09}};
const auto fMax = std::array<simd::float_v, 6>{{14.9999952316, 0.436187297106,
249.999572754, 399.485595703,
1.30260443687, 0.148344695568}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 8>{
{{2.32568146034949, -3.97864517484141, -0.976136452226726, 1.84234344676559,
-3.10046463102268, 4.13961872392198, 1.32395215581256},
{-0.246260592363558, -16.6289365646957, 15.8745926520597, 5.54227150397204,
-3.52013322130382, 3.54800430147538, 4.65963029843042},
{-0.0480865527472585, -0.629210074395733, 6.00348546361291,
2.9051880336304, -0.14352194426084, 1.69533803008533, 8.43612131346998},
{0.586453583994425, -2.56124202576808, 2.59227690708752,
0.0874243316906918, -2.97381765628525, 5.49796401976845,
3.23192359468339},
{0.429663439996412, -22.1383805768484, -0.392774946210208,
-3.3393241414433, -0.0183236766918373, 1.7443084621404,
-23.1241106528584},
{1.51561003857451, -0.252437187813493, 3.4382652179148, 1.64873635165153,
1.3257641118939, -1.3769915299618, 6.284788658685},
{-0.79066972900182, -0.617757099680603, 0.740878002718091,
0.681870030239224, -1.20759406685829, 0.769290467724204,
-1.8437808630988},
{1.26283446391613, 1.060406101318, 0.30016156694275, 0.868137090713936,
0.620452727287864, 0.654572151525178, -1.93868171775984}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 9>, 6>{
{{-0.756398914721592, 1.43176897679079, -1.9761225512629,
-0.252826703054453, 5.76338466721064, 0.853447490406625, 1.63438201788813,
-1.30124222851611, -1.16516476663684},
{1.33354118308893, 2.2779204457711, -2.4183940976708, -1.41409141050929,
-3.03014280476042, -0.105294409656274, -1.61531096417457,
0.0713464687805576, -4.46730787742624},
{1.69117951310622, 0.478803367417533, -0.0952992998738417,
-1.42291620159966, -5.3475695755735, -0.851706256912453,
-0.825543426908553, -1.84634786630319, 1.10300947885605},
{1.62294844942986, -1.4305887420849, 1.34690035656602, -1.75196364787073,
-1.34911857298729, -1.19784919878849, 1.61348068527877, -1.6413641883722,
-1.80987544922642},
{-0.885340378859963, -1.27010625003553, 1.64729145944323,
-1.93179670311711, -2.00487598846412, 0.858689001379895,
-0.848898627795279, 0.783837335125351, -1.50563595386066},
{-0.643070342091735, -1.362074820856, 3.23003893144526, -1.8069989021131,
-1.52168986931666, -2.92720177768097, -1.52203810494393, 1.54153084775635,
4.02998353429178}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 7>{
{-1.03488783417574, 0.540010884713827, -1.17870273673375, 1.01943381348885,
-0.679190259548567, 1.25798110915057, 2.3197360649145}};

46
neural_net_training/result_B/matching.hpp

@ -1,46 +0,0 @@
const auto fMin = std::array<simd::float_v, 6>{
{1.37092808927e-06, 1.07555365503e-06, 0, 1.90734863281e-06,
1.73929147422e-05, 1.86264514923e-09}};
const auto fMax = std::array<simd::float_v, 6>{{14.9999952316, 0.456682443619,
249.999572754, 399.509643555,
1.33115208149, 0.149789437652}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 8>{
{{-1.3734781925797, 13.4202721220084, -5.84482699847354, 0.208720202271194,
3.52940201568696, -5.35007508017961, 6.10232623582908},
{0.269463828190076, 12.2029002280153, 6.20803317501961, -9.43442815316897,
2.5338939027162, 5.99544654330182, 16.266514230858},
{-0.165852817298963, -12.5570036498389, 19.5108101030614, 10.1445756810778,
-4.70591905221782, -9.82613113151628, 2.66946232799658},
{0.280264112609391, -40.4573608414915, 4.50829859766595, -9.38270110978156,
2.13898954875748, 4.73797410702965, -38.2552994749474},
{-15.3913555770922, 1.18454625888548, 1.03308239102009, 2.80096921737441,
-1.86435943580432, -5.12259817922783, -14.7182721956392},
{-0.473433045504226, 14.9901069695702, -0.236384720797966,
-2.83841297397374, 4.98474416815065, -6.59501221410077, 6.97717117093051},
{-0.79066972900182, -0.617757099680603, 0.740878002718091,
0.681870030239224, -1.20759406685829, 0.769290467724204,
-1.8437808630988},
{0.142197307909266, 4.84602282950846, -9.65725300640334, 5.68314089024306,
0.631054662487241, 0.766483060165155, 2.3260315163825}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 9>, 6>{
{{0.647996552227704, -3.612673407752, 0.218049700051821, 4.89119034256858,
-0.00710530398728626, -0.739119819896367, 1.63438201788813,
0.7192739388343, -4.39806909742125},
{-0.719597437431301, -3.27873531826254, -2.03233412174408,
-3.60079441122056, 0.0930923625129556, -2.47473692076248,
-1.61531096417457, -1.73667807655155, 3.65065717704823},
{2.15115292231327, 0.537266754158749, -0.529575619029901,
-0.840914255611436, 1.02786405393109, -2.2383981589872,
-0.825543426908553, -0.685116658617715, -1.95672133400954},
{0.164139216021613, -0.378666175423714, -1.43567813416239,
-1.86509513117207, -0.825083002191541, -1.70460785835385,
1.61348068527877, -1.66550797875971, -0.956253568725315},
{-1.87493924816154, -0.453672605669931, 0.283493943583684,
0.878365550455799, 0.284631862858431, 0.933935190438462,
-0.848898627795279, 0.121615867119966, 2.40557433526087},
{0.853517633026983, -0.322377109742158, 0.30359642229039,
-2.70050427549895, 0.434398564771274, -1.07531792256432,
-1.52203810494393, 0.471135339353818, -7.51274733403613}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 7>{
{-0.773202850704438, 0.952227138510482, 0.74769506152075, 0.306824902699197,
-0.557424643818581, 1.36609661342348, -1.24818793392955}};

48
neural_net_training/result_B_old/matching.hpp

@ -1,48 +0,0 @@
const auto fMin = std::array<simd::float_v, 6>{
{1.37092808927e-06, 1.07555365503e-06, 0, 1.90734863281e-06,
1.73929147422e-05, 1.86264514923e-09}};
const auto fMax = std::array<simd::float_v, 6>{{14.9999952316, 0.456682443619,
249.999572754, 399.509643555,
1.33115208149, 0.149789437652}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 8>{
{{0.55218535628556, -9.3289553119363, -3.16480805777192, 9.21929582222451,
-5.84675321729571, 4.37995011218691, -2.12651852927708},
{2.19402229437066, -36.4572143799157, 4.72612050852174, 0.871774263011679,
0.308249736812244, 5.59902946146285, -21.3121523564936},
{0.326882064023056, 2.35866196875568, 9.48783066071353, 2.75913715527822,
-3.60778259684168, 2.80447887380193, 12.22677213297},
{0.555959841347612, -11.3379921223552, 24.99514413087, 4.38044026679039,
-4.79766508655656, -5.51874542469878, 8.39926399588362},
{-0.474573814356478, -45.048645069346, -1.91571008337192,
-2.97043145049536, -0.791922976045819, 2.80933052961339,
-45.2686657256446},
{1.02111090620048, 0.942295739720341, 4.23884295504771, 3.69611210680021,
3.06108184531354, -5.59083664638509, 5.48212218750871},
{-0.79066972900182, -0.617757099680603, 0.740878002718091,
0.681870030239224, -1.20759406685829, 0.769290467724204,
-1.8437808630988},
{1.25219270646431, 0.549228434890616, 0.470255515433846, 0.916142200504342,
1.60846971174291, 0.516066034145183, -1.99907858325808}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 9>, 6>{
{{-2.16740050633671, 1.64201098704318, -1.81457731661729, 0.276267162453127,
4.41723045721244, 0.116946763347361, 1.63438201788813, -1.34454525041306,
-11.6363132267585},
{-0.975733315897721, -0.74456197080548, 1.37299729852781,
-0.935058973429512, 0.0844226992748141, -0.132452262552727,
-1.61531096417457, -0.186263378023113, 5.02662780750337},
{1.04696354000933, 0.278924511733321, -1.35925413801625, 0.938772342837744,
-0.549530917541879, -0.520171806146222, -0.825543426908553,
-2.06608637235381, -0.791984902945839},
{-1.2045961477844, -0.991003979261367, 1.09783625990238,
-0.421872249827208, -0.889785288418292, 2.04952712400642,
1.61348068527877, -1.7061481912452, -4.6379237728574},
{-1.36108475234833, -0.998277929718627, 1.44485269371602,
-0.712692589749601, 2.24954768341439, 2.14013866962467,
-0.848898627795279, 0.868380765164237, -2.78040856790563},
{-0.388348743847599, -3.23828818784509, -3.09515929145523,
-1.60979064312646, 2.55518501696684, -2.40442392560053, -1.52203810494393,
1.61704406536505, 1.28981466057697}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 7>{
{-0.662286199846436, 0.602757344813461, -0.498657128878293,
0.682053959836921, -0.846606195204036, 0.885206167679193,
-0.091536072257332}};

49
neural_net_training/result_D/matching.hpp

@ -1,49 +0,0 @@
const auto fMin = std::array<simd::float_v, 6>{
{8.165359759e-06, 1.20664617498e-06, 3.0517578125e-05, 0, 4.7143548727e-06,
5.58793544769e-09}};
const auto fMax =
std::array<simd::float_v, 6>{{14.9999341965, 0.441820472479, 249.991241455,
399.657226562, 1.31253051758, 0.1461160779}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 8>{
{{-2.69517659211572, 11.8302794023495, -4.18951579686807, -3.98494892798567,
2.81897548445767, 0.59383239448013, 8.23409922740496},
{0.211366021230384, -17.963369064596, 15.9757126175754, 7.06797978526591,
-4.70452883659984, -6.9670945574808, -6.09966951812501},
{-0.671572194549583, 11.3044506689324, 0.41567016443692, -1.37717944379749,
4.32454960210643, -2.81417446537734, 9.27800394526066},
{-0.0170007006326477, -29.3978844207289, 1.21375106319138,
-4.08361109078602, 1.26964946956945, 2.36059581879151, -28.6616649803861},
{-11.5040478504233, 0.787126057627091, -1.9688816880041, 3.80563620582515,
-1.24505398457039, -4.63206817893295, -13.6204407803068},
{-0.338909805576579, 5.40829054574145, -5.80255047095045,
-4.01690019633219, 1.01720190260241, -8.00726918670078,
-9.13220942993612},
{-0.79066972900182, -0.617757099680603, 0.740878002718091,
0.681870030239224, -1.20759406685829, 0.769290467724204,
-1.8437808630988},
{-0.0200186919403349, 1.41949954504535, 1.49019129872922,
0.288411192617344, -1.04637027529446, 0.461207091311545,
2.34712624673865}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 9>, 6>{
{{-0.742932789484951, 1.098742538125, -0.406409364576387, 3.47055509094897,
0.0962780863393642, 1.41748292133237, 1.63438201788813, -1.44301381179313,
-0.572613401802679},
{-0.38589120983735, 1.59861062444015, -0.0248567208616739,
0.671741015980856, -0.708380620370054, -1.03895600322296,
-1.61531096417457, -0.148523097987218, -4.64632456422582},
{0.79166633002489, -1.08475628425482, -4.28859285488566, 1.52323344063281,
0.841577416846386, -2.87987947235168, -0.825543426908553,
-1.68433960913801, 3.44474663480542},
{0.0775004589408732, -0.262461293729405, -1.52083397977799,
-1.8717755745741, -0.836405509817299, -1.7218693116007, 1.61348068527877,
-1.66550797875971, -0.970612266783855},
{-0.173976577204694, 0.622518962366594, 1.06846030554012,
-1.98774771637332, 0.519455930696643, 0.29715629978414,
-0.848898627795279, -0.571811756436865, -0.634485828880002},
{1.01806297385566, -2.23322855713652, -0.6087066354355, -2.48675705217909,
3.17812971554116, 0.101672334443862, -1.52203810494393, 2.31992216900119,
-1.25181073559493}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 7>{
{-0.916964821952665, 0.719312774569769, -0.639131582384414,
0.543723763328418, -0.519810071051254, 0.818949275577508,
-0.217502220186121}};

47
neural_net_training/result_D_old/matching.hpp

@ -1,47 +0,0 @@
const auto fMin = std::array<simd::float_v, 6>{
{3.08839298668e-06, 1.0285064036e-06, 1.52587890625e-05, 0,
5.87664544582e-07, 1.16415321827e-10}};
const auto fMax =
std::array<simd::float_v, 6>{{14.9999723434, 0.448565632105, 249.991241455,
399.657226562, 1.32571601868, 0.1461160779}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 8>{
{{-13.6018653076529, 11.5780217700141, -7.92762809494091, -2.3767990231665,
2.10509041357149, 8.93423542038951, 0.697736541430846},
{1.39148569147387, -18.5749654585149, 16.332262515645, 8.93683318362009,
-5.31296543840869, -5.3403427435078, -2.19396356951465},
{-1.01323411158617, 13.2753123794943, 0.728991860392637, -2.42297786296918,
5.31377513515812, -3.50060317341991, 10.417424252956},
{-0.248243535822069, 4.62216903283789, 7.02215266119243, 1.16722623835237,
-4.02343144066426, 0.795833957766165, 8.68951250524976},
{-0.238717750484162, 6.4095254209171, -7.18004762765776, -5.26488261250603,
0.399079753011244, -13.2043917021304, -15.6484370000787},
{0.28927080766293, -43.0775712799999, 1.66954473021466, -9.33896425089968,
2.33665742943925, 3.79800824384931, -44.3378970188981},
{-0.79066972900182, -0.617757099680603, 0.740878002718091,
0.681870030239224, -1.20759406685829, 0.769290467724204,
-1.8437808630988},
{1.40243557561751, 0.527362898119982, 0.45726589950568, 1.14682278333905,
1.07970493015474, -0.120090795589863, -1.93859670804163}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 9>, 6>{
{{-0.799170659507791, 0.78794128149515, -0.763826599227941,
-2.3771947370175, 1.02090569194105, 2.93661596670106, 1.63438201788813,
-1.4315640726598, -1.65256239855233},
{-0.0840828763430264, 1.63030483445294, 0.480480602063334,
-2.6196066367932, -1.07206902633681, 1.70077768270329, -1.61531096417457,
0.0827459973313509, -6.82577663153282},
{0.549379141222342, -1.30994855822444, -3.47047538273556,
0.416631880451092, -2.01641324755852, 0.534999953845232,
-0.825543426908553, -1.89592023892521, 5.51877157805828},
{0.0804714249535426, -0.5308079142129, -1.48689873935011,
-1.86763554052357, -0.869089360209786, -1.67763600182079,
1.61348068527877, -1.66550797875971, -0.925481963732789},
{-0.686375033428724, 1.09398610198181, 0.699349709460149,
-1.04209787556848, 0.0477294646540392, -0.311194459626976,
-0.848898627795279, 1.21798575421877, -1.20136465619996},
{0.65672978185887, -2.41522086895727, -0.906588505776888, 1.17488116346046,
0.348225140957002, -1.76790548692959, -1.52203810494393, 1.20010038210504,
2.16681827421459}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 7>{
{-0.711664725241253, 0.506164178116774, -0.741743336419543,
0.501270635463003, -0.672368683770616, 0.747306441658917,
0.789949973283111}};

58
nn_electron_training/result/matching.hpp

@ -1,58 +0,0 @@
const auto fMin = std::array<simd::float_v, 7>{
{2.32376150961e-05, 1.20999845876e-06, 3.0517578125e-05, 0.000152587890625,
5.18634915352e-05, 3.16649675369e-08, 4964.515625}};
const auto fMax = std::array<simd::float_v, 7>{
{29.999835968, 0.448848098516, 490.75402832, 499.918823242, 1.29696559906,
0.148829773068, 5764.58056641}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 8>, 9>{
{{-13.8767665400575, 4.05734115522388, -3.01709661856028, 1.12334316344471,
-1.95431900429486, -4.28496976296461, 2.12003203912787,
-16.3247309911133},
{0.212048009453922, -15.1738107548058, 16.7279720978323, 7.86809247963017,
-2.44754013164889, 7.7765844954342, -7.1858320802125, 14.8502047053221},
{1.03697617644536, -7.74330829725443, 6.56587047894099, 17.8488797860709,
-6.58256061835055, -14.3326703613101, -4.21591741028686,
-3.48521822531376},
{1.07161857075862, -6.02457375820184, -2.95388380942296, -1.32423877366328,
4.40729929976243, 4.47413261680277, -9.1510537721088, -3.00961301024585},
{-0.483652311202822, 1.61937809966064, 3.0445519571216, 0.815891204469984,
0.474869080905695, 3.43775266744451, -1.25098304071557, 7.12769003125851},
{-8.4010714790805, 8.31810836442086, -3.26991947652379, 1.31844760189238,
-0.316007929405036, -0.703746325371237, 4.74898967505285,
-1.11739245753407},
{-0.592761413330552, 4.04188612003611, -0.218806073885883,
3.90563951642846, 7.09174466959683, -6.3569150742699, -5.14953269394216,
2.75424697228316},
{0.547164481580195, 1.70249203967427, 1.94714702524239, -13.7351709164445,
1.80504850488469, -2.90102696607898, 0.572900917600169, -10.365898528612},
{-1.41297642979771, 1.7421562904492, 1.51246974803507, -0.277205719612539,
-0.746303261257708, 1.31841345876455, -0.315569517202675,
-1.43151946831495}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 10>, 7>{
{{-2.70914120357355, 0.519189852188428, 1.64293953499867, -1.42908155115225,
-0.911252443482285, -3.62723599571144, -3.12039388485614,
-2.24012508264097, -1.80018616467714, -0.387269363887802},
{0.825289573993859, 0.977559873140871, -1.19932065232476,
0.448270358180695, -1.01118687034592, -0.12068624133809, 1.92125679147867,
-1.22870635454816, 1.06194042880088, -1.67985680933482},
{0.117628014226149, -0.666150093594241, -1.96462719830508,
-1.34621345717382, 2.69897179096947, 1.45683981784585, -0.280779666268364,
-1.09056907866035, 0.143585634417832, -0.853077107436903},
{0.343557768966074, -1.36884597467765, -0.978489408664556,
1.04108942352196, 2.38422271469634, -1.42280162989848, -1.24692906453324,
1.16005819097626, -1.81861709989607, 0.792826064358476},
{-2.43543923840386, -0.790741678609659, -0.86057585327147,
-0.560696061368329, -0.546486276970939, -1.10828693920102,
-0.390844170382116, -0.191292459405275, 0.655178595334291,
3.62562636803186},
{-1.85600205994161, -0.851713021005162, -2.36960755021907,
-2.65847940214873, 4.19992558926354, 0.482968294979867,
-0.674617611858262, 0.537074281854966, -1.44013551902026,
0.12897906197469},
{3.05467659680961, -0.835919265923888, -1.97139370203255,
-0.833191777667285, 3.1259995582494, 1.3049178372323, -0.601501165563516,
-0.476449568704171, 0.0595564302057028, 1.86826919022162}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 8>{
{-0.742315179835233, -0.384238828861699, -0.639019653069106,
-0.469522590533314, 0.812934812918375, -0.548705434492968,
1.10784727825793, -1.47828921845706}};

46
nn_electron_training/result_B_old/matching.hpp

@ -1,46 +0,0 @@
const auto fMin = std::array<simd::float_v, 6>{
{2.32376150961e-05, 1.51249741975e-06, 3.0517578125e-05, 4.57763671875e-05,
1.30217522383e-05, 9.31322574615e-10}};
const auto fMax =
std::array<simd::float_v, 6>{{29.9999866486, 0.402866601944, 497.675262451,
499.88583374, 1.35172855854, 0.1488314569}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 8>{
{{-0.716890254960393, 5.8069257184991, -1.74563699770656, -1.69375462311209,
0.292600378995007, 4.27627333971203, 5.05829948252536},
{1.39109753193721, -6.17525389654849, 7.57671398067678, -5.43048780303785,
-1.09791116843721, 1.86130825538439, -3.82867359027486},
{-0.463070234910456, -4.56547441068759, 5.40748303002796, 24.3147882327414,
-6.31462696612228, -15.7641466083901, 3.16004633819498},
{0.153443312046544, -13.7240931193717, 12.4658109156892, 3.93975979118258,
-6.11948248810469, 12.0087465863604, 11.8434487900601},
{-5.38333972443605, 7.08960513470396, -14.0225023836695, 1.62191385618879,
-3.70995234249952, -6.21018449120275, -16.3820927289576},
{1.28910616897801, 11.7392825108682, -0.745172957676181, -2.71535399916244,
2.69193347520725, -7.76807154851574, 3.33706974699574},
{-0.79066972900182, -0.617757099680603, 0.740878002718091,
0.681870030239224, -1.20759406685829, 0.769290467724204,
-1.8437808630988},
{1.69376603852368, 0.713685235953229, 0.537330926797311, 1.24885881426728,
0.849445456302149, 0.0549823762550653, -1.60838065333664}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 9>, 6>{
{{-3.49743269512971, -1.59190099226759, 2.68952831238107, 1.47409713154181,
-0.358823304868459, 1.51035818148923, 1.63438201788813, -1.37184378061365,
-4.8236951156242},
{-1.62443558899203, 0.637337506470021, -1.81394608796523,
-0.39782822266736, 2.98247880411195, -3.00550692859844, -1.61531096417457,
0.0991975320503116, -7.79260298177481},
{2.63673645224951, 0.769840121669036, -1.81866900675112, -1.22134862739373,
0.671174013434412, -1.47933584039013, -0.825543426908553,
-1.92253219419135, 3.8017813083906},
{0.205195965291138, -0.35698019904733, -1.43178372298118,
-1.86979559465315, -0.819043768918633, -1.72129504552091,
1.61348068527877, -1.66550797875971, -0.957274797031432},
{3.39235161127949, 0.557496083138389, 0.358810791879255, -1.30084105984251,
-0.542916984939091, -0.0267147558240502, -0.848898627795279,
0.771556793635358, 0.0697782536980876},
{0.481340186388348, 0.112198736662793, 2.17905577117167,
-0.602783430688711, -0.0915323075405589, 0.497824854127751,
-1.52203810494393, 1.50364257368639, -0.374485200843083}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 7>{
{-0.768478620967589, 0.945551538481868, 0.96174226855089, 0.370062157422418,
-0.78327662856066, 0.822576347537717, -0.718860728264376}};

47
nn_electron_training/result_B_res/matching.hpp

@ -1,47 +0,0 @@
const auto fMin = std::array<simd::float_v, 6>{
{0.354097932577, 5.52064511794e-06, 0.000244140625, 8.39233398438e-05,
6.46021217108e-05, 3.98140400648e-08}};
const auto fMax = std::array<simd::float_v, 6>{{29.9984798431, 0.343307316303,
487.684082031, 497.415130615,
1.28809189796, 0.148829773068}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 8>{
{{0.528613355828958, -3.98084730501778, -0.592082531501982,
0.138947239841158, -0.778623431382993, -0.581951852087617,
-3.3077751082926},
{0.626191010935061, -6.59632782328807, 9.64286730275841, 9.55716888102903,
-4.21769290858214, -0.877735461418827, 7.66427785912706},
{0.589042763591211, 0.342730710819044, 2.15591537442552, 3.00613486546159,
0.031406906405544, 0.245821626313224, 4.14102878259858},
{0.331983030850774, 0.936730026632873, 5.0246621889186, -8.55182143000926,
-1.36911477615904, -0.62033806094376, -4.12767358459756},
{-1.83087334545531, 1.70659514344126, 1.64680904436349, -3.69383485282499,
-1.60992615163927, -1.33158200933679, -3.68738551321132},
{0.497605072926462, 3.10686068573917, -3.38889852931357, -2.83744183592321,
5.8582848269084, -5.8114650940735, -2.19632367553395},
{-0.79066972900182, -0.617757099680603, 0.740878002718091,
0.681870030239224, -1.20759406685829, 0.769290467724204,
-1.8437808630988},
{2.34124335963039, 0.212194857163335, 0.442598967492635, 1.99142561696414,
0.932043520652152, 0.0950084159057334, -0.343964005014347}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 9>, 6>{
{{-3.9980933021236, -0.654530522369937, 1.38643691032487, 0.846962243830957,
0.106764765445591, 0.432714049442539, 1.63438201788813, -1.09500118769163,
-0.477330937420509},
{-1.00177046130397, 0.910392283082755, -1.10524270003512,
-0.863367119958066, 0.356000819965252, -1.36464636332376,
-1.61531096417457, -1.07499530514837, 2.02772049025211},
{1.06312654536343, 1.19247984844137, -2.56993344812772, -1.59660765668362,
-1.43473393145022, -2.45597801241373, -0.825543426908553,
-1.66068434492917, 1.54276462560785},
{1.81681515757912, -1.04949680940877, -1.47464408054066, -2.35655553716087,
-0.81674566968838, -2.03350840389647, 1.61348068527877, -1.66550797875971,
-2.15831244577917},
{-1.03932528137019, 1.40966162144001, -1.28446720148786, -1.3440214301115,
-0.764149070532308, -0.346882028973845, -0.848898627795279,
2.00051119462677, 3.35327375607444},
{-1.86664223320468, -2.77494106516727, 0.280364440162091,
-0.51153329496928, 0.099515543403597, -0.231471190430381,
-1.52203810494393, 1.14272217943492, 0.830204232719646}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 7>{
{-0.707654910623957, 0.947610371696967, -0.734533082005471,
2.92853232573231, -0.764897377620809, 2.76504552610281, 2.01235259703278}};

47
nn_electron_training/result_D_res/matching.hpp

@ -1,47 +0,0 @@
const auto fMin = std::array<simd::float_v, 6>{
{0.257591664791, 1.18096104416e-05, 0.000593185424805, 0.00165557861328,
0.00012809690088, 4.9639493227e-07}};
const auto fMax = std::array<simd::float_v, 6>{{29.9983310699, 0.346089184284,
494.445037842, 497.105712891,
1.28034591675, 0.146788269281}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 8>{
{{0.238406879601667, -5.59592601269328, -1.48529518782053,
-1.21815009023291, 1.34269102160607, 1.34969291565497, -4.51875687730105},
{0.396886922398879, -1.55290356333354, 9.68785078213303, 8.92661791228501,
-4.14921556686506, -4.79373464075343, 8.51558304693096},
{-0.605978331887513, -2.01049013335995, 2.42576702923552, 1.52363979902223,
-0.98764665307072, 5.47124537232274, 6.44617285846946},
{0.194697743583909, 1.28944295625644, 7.01265960466827, -8.8098678043251,
-1.29787641608371, -1.01125992648077, -2.62580313202802},
{-0.149097384185005, 0.601644139549549, -3.20384472073729,
-1.11764357962076, 0.661266078420317, -2.99007258105897,
-4.75089443675904},
{-0.0637125691675382, -0.031901246578545, -5.86825160360429,
-6.08669255423129, 6.57894839440667, 1.56562582414305, -2.45567329718821},
{-0.79066972900182, -0.617757099680603, 0.740878002718091,
0.681870030239224, -1.20759406685829, 0.769290467724204,
-1.8437808630988},
{2.16691834156097, 1.28877310398459, -0.0182036429219536, 1.64574682748412,
-1.776462177169, 1.02789865613476, -1.86072790490082}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 9>, 6>{
{{-4.86220133984369, 0.118835729154668, -0.219969977992415,
0.391324848601455, -1.52700917088122, 1.34069581551041, 1.63438201788813,
-1.46686286855675, 0.828587551619351},
{-2.34704356917816, 1.32098559104381, -0.35321222806336, -2.37018474851075,
-0.428177327276122, -0.598193543229222, -1.61531096417457,
0.788200423431897, 1.42375444061969},
{-0.520599794082693, 1.88897717167843, -0.983200551417999,
-2.10145861332195, 2.58359759649054, -1.9520611743449, -0.825543426908553,
-2.21273436389439, 1.68368588984848},
{0.687372876118682, -0.350871511760717, -1.43005506081713,
-1.86332872620019, -0.805133918174304, -1.70605683547268,
1.61348068527877, -1.66550797875971, -0.80539832878319},
{0.641334100110318, 0.829686404507413, 1.12377545166463, -1.2786548533532,
-2.2652307380297, -0.577326144935801, -0.848898627795279,
-0.112416063323718, 3.09322414387249},
{-2.10459256659739, -2.04968111694632, 0.989486352894292,
-1.53078668929007, -0.90726448865931, 0.837532331802425,
-1.52203810494393, 2.96223264118436, -2.25826102849139}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 7>{
{-0.632200441072234, 1.49561211302111, -1.13710464066982, 0.45277221100554,
-0.690200710879259, 0.878498633554998, 2.07286062799155}};

48
nn_electron_training/result_electron_weights/matching.hpp

@ -1,48 +0,0 @@
const auto fMin = std::array<simd::float_v, 6>{
{2.32376150961e-05, 1.20999845876e-06, 3.0517578125e-05, 0.000152587890625,
5.18634915352e-05, 3.16649675369e-08}};
const auto fMax = std::array<simd::float_v, 6>{{29.999835968, 0.448848098516,
490.75402832, 499.918823242,
1.29696559906, 0.148829773068}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 8>{
{{0.972643778287334, 0.945437530240695, -1.40069143935294,
-15.6034120045671, 1.14493675557278, 6.76331107008671, -6.58864627844693},
{1.99177578845469, -13.3678019612632, 8.38118795560118, 1.73988710441318,
-4.61454323644065, 5.29554800958296, 1.796743670204},
{0.154471209290507, -6.25196675947653, 5.03239643950246, 17.3659761341648,
-6.54695139344376, -13.0321058473978, -2.79459536100855},
{-1.91255962568079, -8.6500289238652, 11.3312847667967, 13.5402314908838,
-2.61341614761575, 6.63476937311634, 18.5047027165893},
{-13.4902851128642, 5.03927112314943, -7.35289370328568,
0.0572131890099181, -1.6142848069816, -3.07255458814266,
-18.9635216594601},
{1.88222476973218, 6.53087839421258, 2.08080853139342, 0.816872513930955,
1.76981234909237, -8.6501994076645, 3.81699174241397},
{-0.79066972900182, -0.617757099680603, 0.740878002718091,
0.681870030239224, -1.20759406685829, 0.769290467724204,
-1.8437808630988},
{1.96787188749046, 0.680940366397391, 0.050263650384077, 1.68306844400001,
1.12938262301514, 0.122157098634831, -0.887283402159991}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 9>, 6>{
{{-2.73702380879827, 1.22468365009789, 2.40149928694528, 0.276654711632341,
-0.947460759127638, -0.94795299724562, 1.63438201788813,
-1.41515589667229, -0.708508928627869},
{-0.408168817589508, -0.542699435360695, -0.336829708223667,
-0.507220427829013, 0.533181686353704, -0.0512849135791123,
-1.61531096417457, 0.0991539876010671, 4.00684418941464},
{0.401110123287066, -0.82501422982477, -0.82214087163611,
-2.13310745114762, 0.656608219190029, -1.54611499475089,
-0.825543426908553, -1.92246825444023, -2.49920928064247},
{0.743417630960188, -2.54297207137451, 0.868639896626588, 1.21759484724959,
-0.432278512319556, -0.682439011110067, 1.61348068527877,
-1.70813842427554, 0.191141321065651},
{0.601790057732671, -2.70865568575877, -0.949516903771233,
1.41807664967738, 0.0135866328882364, 1.63463920593405,
-0.848898627795279, 0.794266404867267, -4.68030461730642},
{-0.894524549453373, -0.413420422791491, -1.27841462173856,
-0.921761527738667, 1.7613032977725, -1.20901458126865, -1.52203810494393,
1.63899587513312, 3.18360564985773}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 7>{
{-0.468166794846483, 0.905418443044577, 0.345720533590786,
0.626519340549303, -0.564753919345451, 0.871170117133406,
-2.29725166588317}};

62
nn_electron_training/result_new_var_dtxy/matching.hpp

@ -1,62 +0,0 @@
const auto fMin = std::array<simd::float_v, 7>{
{2.32376150961e-05, 1.20999845876e-06, 3.0517578125e-05, 0.000152587890625,
5.18634915352e-05, 3.16649675369e-08, 1.63267832249e-05}};
const auto fMax = std::array<simd::float_v, 7>{
{29.999835968, 0.448848098516, 490.75402832, 499.918823242, 1.29696559906,
0.148829773068, 1.406919837}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 8>, 9>{
{{-15.5425486721894, 4.46064219760936, -2.34623364306547, 0.673061906567754,
-0.869156572627564, -3.91456514808376, -0.568696256770434,
-16.6632172224501},
{0.447767212299748, -12.0732988541946, 13.5397418382974, 6.88739679435815,
-6.24126921111681, 10.2657097797903, 2.43233624582838, 16.3044055554715},
{0.711332752822416, -7.17479141259481, 6.60735241080743, 17.1002661287198,
-5.66447497808782, -13.5847364290022, -3.2812531600052,
-4.16110866444881},
{0.632252449853337, -0.994201889160893, 0.163028638247136,
0.771845371822938, 1.96713990468425, 3.63340983309008, -1.20631209983256,
-0.448420201049805},
{-0.841164977118048, 9.93038462960693, 2.29748287289709,
-0.0626255430240932, 3.26040532046237, -3.3032557034584,
0.549324748173291, 8.63089145494412},
{-4.64294924610689, -1.03961735354666, -5.94838304383518,
-5.14494916413428, 0.865768755325211, 3.17305862226336, -0.17689672644592,
-11.1702998443119},
{-0.75257412651179, 7.45653016330318, 1.53531423087191, -0.944661904110734,
2.27175825244693, 0.625586633690943, 0.556680865915938, 8.70515377733531},
{1.48517605340595, -1.10139488332919, -1.20437312666678, -15.7567359489487,
0.564551471160599, 0.343355103916556, 0.956188296533458,
-14.4810699542064},
{-1.41297642979771, 1.7421562904492, 1.51246974803507, -0.277205719612539,
-0.746303261257708, 1.31841345876455, -0.315569517202675,
-1.43151946831495}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 10>, 7>{
{{0.249095596049212, 0.43896816611743, 2.51443611518656, -1.99550475508056,
-3.01891555380374, -1.5384309247739, -1.10809432820241, -2.23884147411375,
-1.80018616467714, 0.0926501061367807},
{-0.79810107527313, -0.128565504120936, -1.47898746860618,
-1.98749268865462, -4.1729473774923, -0.319376625137038, 2.68241976233123,
-1.2438721745196, 1.06194042880088, -1.11115934197209},
{-0.541616972751047, -0.883639706603654, -1.21647636736428,
2.00429851976991, -0.333604676335978, -1.30666235698471,
0.300409853048531, 1.71280717271126, 0.143585634417832,
0.862440249952535},
{-0.0738827412712401, 0.710660017309775, -1.81469923323104,
-2.0032894120881, 0.0757757984176355, 0.946471866500602,
-0.862679340246423, -0.336329345694109, -1.81861709989607,
-1.65647777258377},
{-2.46837296738587, -0.892461394707053, -0.164670653708065,
-1.40986988591441, -1.29634197190675, -0.103818171050218,
1.62473520412615, -0.10368342877725, 0.655178595334291, 3.10987357888943},
{-3.51942943078094, 0.05403637176598, -0.112974678381018,
-0.992599640919349, 2.32462754890465, 0.0152632384089371,
-1.55107042088954, -2.78524739346744, -1.44013551902026,
-0.069348300182213},
{3.50273770909445, -0.563785026359985, -0.682273837786807,
0.00116206143253937, 0.0443816144597161, 0.571844608360393,
-1.17322063876001, -1.09420727621842, 0.0595564302057028,
0.887055205865514}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 8>{
{-0.527095695381938, 0.873978759522188, -0.505869602713493,
-0.458736757125275, 1.00063852384923, -0.651233083081496, 1.09109419846381,
-1.55585886153223}};

63
nn_electron_training/result_new_variable_dqop/matching.hpp

@ -1,63 +0,0 @@
const auto fMin = std::array<simd::float_v, 7>{
{2.32376150961e-05, 1.20999845876e-06, 3.0517578125e-05, 0.000152587890625,
5.18634915352e-05, 3.16649675369e-08, 2.91038304567e-11}};
const auto fMax = std::array<simd::float_v, 7>{
{29.999835968, 0.448848098516, 490.75402832, 499.918823242, 1.29696559906,
0.148829773068, 0.000133186404128}};
const auto fWeightMatrix0to1 = std::array<std::array<simd::float_v, 8>, 9>{
{{-1.92788180969447, 4.16064785412784, -2.11335551271703, 7.9294095607534,
2.18170560740568, -8.44761548627774, -21.5047552584798,
-16.0650884865238},
{-1.61376550856811, -18.3767723062232, 6.29188806075221, 17.1629698975724,
-4.06178649035417, 8.91994724771869, 0.945309347327087, 10.2364214261801},
{0.260725207756956, -7.39063316113963, 5.85798680146154, 20.895198655668,
-5.37769824548582, -14.2293948664243, 0.995342070597369,
1.20800372506884},
{0.953241806012774, 2.47323765132763, 2.08443843097691, -1.43196935568204,
4.74700613459522, 0.189179081361804, -16.9045615453658,
-6.43026395704888},
{-1.92981663032188, -0.37230565268653, 0.814369803792726, 1.73699189907859,
1.11733301402944, 1.46887116928329, 1.49186452419427, -1.29918641902703},
{-16.8338849958765, 6.56643273179019, -3.48428147705122, 0.326903745037315,
-2.06105265356339, -4.41540617406857, 5.02090403276349,
-13.5579467656888},
{-2.06856046873756, 1.37857017711159, -10.4255727807086, 5.40802232507597,
6.86445294409404, 5.35440411482745, -6.85993978444102,
-0.729076014469736},
{-0.526653874830334, 8.98715315712336, -2.34742788084526,
-1.27417058696474, 5.55759129208842, -3.2700796957674, -0.831113531084397,
2.18499951551135},
{-1.41297642979771, 1.7421562904492, 1.51246974803507, -0.277205719612539,
-0.746303261257708, 1.31841345876455, -0.315569517202675,
-1.43151946831495}}};
const auto fWeightMatrix1to2 = std::array<std::array<simd::float_v, 10>, 7>{
{{0.0324469931195793, -0.288230539372084, 1.64983047434275,
-1.24756371282518, -1.94639586807131, -0.310928305245747,
-4.99162520915551, 0.264942892832968, -1.80018616467714,
2.77914512003005},
{-0.0148602437129058, -1.2132748075938, -0.218359722842231,
-0.633592266259126, -1.66464499515867, -2.55247320011507,
0.942074824320476, -1.41987137293765, 1.06194042880088, 3.89059634854256},
{-0.0974156676281787, 1.4515472939941, 1.12169407748122, 0.1569833587188,
0.715433387778868, -2.40068948213013, -1.20271162851859, 1.58722622760245,
0.143585634417832, -0.958611632301647},
{-0.535241107505903, -0.222101479961216, -1.72874348280829,
-1.09357655226657, 1.67832177468419, -1.85229898078416,
-0.879756942942339, 0.0297380421842839, -1.81861709989607,
-0.271711324852575},
{-2.12445796868783, -0.913233265968283, -0.338898758417067,
-1.65257155394075, -1.15348755568266, -0.571688294860023,
-0.590397833605982, -0.152323738308279, 0.655178595334291,
-6.84207556062884e-06},
{-1.95868900053493, -0.605205894790946, -1.36009261632635,
-2.34452772551367, 1.60461574133745, -0.00209217938454121,
0.145219515490194, -3.24026630749251, -1.44013551902026,
10.2107763198695},
{0.384756246095988, -0.392456215033468, -2.59979095776574,
-1.14968086393069, -0.936541749845882, 4.08852696879947,
-0.0319867516820682, -1.98678786024887, 0.0595564302057028,
3.2850148235822}}};
const auto fWeightMatrix2to3 = std::array<simd::float_v, 8>{
{-0.975457561894625, 0.722739660815715, -0.35623550622024, 1.13391106903613,
0.663374242757088, -0.893283186205502, 0.795604576331046,
-1.33372154704332}};

17
nn_trackinglosses_training/result/matching.hpp

@ -1,17 +0,0 @@
const auto ResfMin = std::array<simd::float_v, 6>{
{1.20620707094e-05, 2.0063980628e-06, 8.45295653562e-05, 0.000119162104966,
4.75468114018e-05, 4.38088898491e-09}};
const auto ResfMax =
std::array<simd::float_v, 6>{{29.999212265, 0.29415422678, 486.515930176,
499.948669434, 1.293815732, 0.145083397627}};
const auto ResfWeightMatrix0to1 = std::array<std::array<simd::float_v, 7>, 8>{
{{nan, nan, nan, nan, nan, nan, nan},
{nan, nan, nan, nan, nan, nan, nan},
{nan, nan, nan, nan, nan, nan, nan},
{nan, nan, nan, nan, nan, nan, nan},
{nan, nan, nan, nan, nan, nan, nan},
{nan, nan, nan, nan, nan, nan, nan},
{nan, nan, nan, nan, nan, nan, nan},
{nan, nan, nan, nan, nan, nan, nan}}};
const auto ResfWeightMatrix1to2 = std::array<simd::float_v, 9>{
{-nan, -nan, -nan, -nan, -nan, -nan, -nan, -nan, -nan}};

268
outputs_nn/output_B.txt

@ -1,268 +0,0 @@
: Parsing option string:
: ... "V:!Silent:Color:DrawProgressBar:AnalysisType=Classification"
: The following options are set:
: - By User:
: V: "True" [Verbose flag]
: Color: "True" [Flag for coloured screen output (default: True, if in batch mode: False)]
: Silent: "False" [Batch mode: boolean silent flag inhibiting any output from TMVA after the creation of the factory class object (default: False)]
: DrawProgressBar: "True" [Draw progress bar to display training, testing and evaluation schedule (default: True)]
: AnalysisType: "Classification" [Set the analysis type (Classification, Regression, Multiclass, Auto) (default: Auto)]
: - Default:
: VerboseLevel: "Info" [VerboseLevel (Debug/Verbose/Info)]
: Transformations: "I" [List of transformations to test; formatting example: "Transformations=I;D;P;U;G,D", for identity, decorrelation, PCA, Uniform and Gaussianisation followed by decorrelation transformations]
: Correlations: "False" [boolean to show correlation in output]
: ROC: "True" [boolean to show ROC in output]
: ModelPersistence: "True" [Option to save the trained model in xml file or using serialization]
DataSetInfo : [MatchNNDataSet] : Added class "Signal"
: Add Tree Signal of type Signal with 187767 events
DataSetInfo : [MatchNNDataSet] : Added class "Background"
: Add Tree Bkg of type Background with 14040318 events
: Dataset[MatchNNDataSet] : Class index : 0 name : Signal
: Dataset[MatchNNDataSet] : Class index : 1 name : Background
Factory : Booking method: matching_mlp
:
: Parsing option string:
: ... "!H:V:TrainingMethod=BP:NeuronType=ReLU:EstimatorType=CE:VarTransform=Norm:NCycles=700:HiddenLayers=N+2,N:TestRate=50:Sampling=1.0:SamplingImportance=1.0:LearningRate=0.02:DecayRate=0.01:!UseRegulator"
: The following options are set:
: - By User:
: <none>
: - Default:
: Boost_num: "0" [Number of times the classifier will be boosted]
: Parsing option string:
: ... "!H:V:TrainingMethod=BP:NeuronType=ReLU:EstimatorType=CE:VarTransform=Norm:NCycles=700:HiddenLayers=N+2,N:TestRate=50:Sampling=1.0:SamplingImportance=1.0:LearningRate=0.02:DecayRate=0.01:!UseRegulator"
: The following options are set:
: - By User:
: NCycles: "700" [Number of training cycles]
: HiddenLayers: "N+2,N" [Specification of hidden layer architecture]
: NeuronType: "ReLU" [Neuron activation function type]
: EstimatorType: "CE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood]
: V: "True" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
: VarTransform: "Norm" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
: H: "False" [Print method-specific help message]
: TrainingMethod: "BP" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)]
: LearningRate: "2.000000e-02" [ANN learning rate parameter]
: DecayRate: "1.000000e-02" [Decay rate for learning parameter]
: TestRate: "50" [Test for overtraining performed at each #th epochs]
: Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch]
: SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.]
: UseRegulator: "False" [Use regulator to avoid over-training]
: - Default:
: RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')]
: NeuronInputType: "sum" [Neuron input function type]
: VerbosityLevel: "Default" [Verbosity level]
: CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
: IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
: EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)]
: SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training]
: SamplingTraining: "True" [The training sample is sampled]
: SamplingTesting: "False" [The testing sample is sampled]
: ResetStep: "50" [How often BFGS should reset history]
: Tau: "3.000000e+00" [LineSearch "size step"]
: BPMode: "sequential" [Back-propagation learning mode: sequential or batch]
: BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events]
: ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)]
: ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)]
: UpdateLimit: "10000" [Maximum times of regulator update]
: CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value]
: WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range]
matching_mlp : [MatchNNDataSet] : Create Transformation "Norm" with events from all classes.
:
: Transformation, Variable selection :
: Input : variable 'chi2' <---> Output : variable 'chi2'
: Input : variable 'teta2' <---> Output : variable 'teta2'
: Input : variable 'distX' <---> Output : variable 'distX'
: Input : variable 'distY' <---> Output : variable 'distY'
: Input : variable 'dSlope' <---> Output : variable 'dSlope'
: Input : variable 'dSlopeY' <---> Output : variable 'dSlopeY'
matching_mlp : Building Network.
: Initializing weights
Factory : Train all methods
: Rebuilding Dataset MatchNNDataSet
: Parsing option string:
: ... "SplitMode=random:V:nTrain_Signal=100000.0:nTrain_Background=200000.0:nTest_Signal=10000.0:nTest_Background=20000.0"
: The following options are set:
: - By User:
: SplitMode: "Random" [Method of picking training and testing events (default: random)]
: nTrain_Signal: "100000" [Number of training events of class Signal (default: 0 = all)]
: nTest_Signal: "10000" [Number of test events of class Signal (default: 0 = all)]
: nTrain_Background: "200000" [Number of training events of class Background (default: 0 = all)]
: nTest_Background: "20000" [Number of test events of class Background (default: 0 = all)]
: V: "True" [Verbosity (default: true)]
: - Default:
: MixMode: "SameAsSplitMode" [Method of mixing events of different classes into one dataset (default: SameAsSplitMode)]
: SplitSeed: "100" [Seed for random event shuffling]
: NormMode: "EqualNumEvents" [Overall renormalisation of event-by-event weights used in the training (NumEvents: average weight of 1 per event, independently for signal and background; EqualNumEvents: average weight of 1 per event for signal, and sum of weights for background equal to sum of weights for signal)]
: ScaleWithPreselEff: "False" [Scale the number of requested events by the eff. of the preselection cuts (or not)]
: TrainTestSplit_Signal: "0.000000e+00" [Number of test events of class Signal (default: 0 = all)]
: TrainTestSplit_Background: "0.000000e+00" [Number of test events of class Background (default: 0 = all)]
: VerboseLevel: "Info" [VerboseLevel (Debug/Verbose/Info)]
: Correlations: "True" [Boolean to show correlation output (Default: true)]
: CalcCorrelations: "True" [Compute correlations and also some variable statistics, e.g. min/max (Default: true )]
: Building event vectors for type 2 Signal
: Dataset[MatchNNDataSet] : create input formulas for tree Signal
: Building event vectors for type 2 Background
: Dataset[MatchNNDataSet] : create input formulas for tree Bkg
DataSetFactory : [MatchNNDataSet] : Number of events in input trees
:
:
: Dataset[MatchNNDataSet] : Weight renormalisation mode: "EqualNumEvents": renormalises all event classes ...
: Dataset[MatchNNDataSet] : such that the effective (weighted) number of events in each class is the same
: Dataset[MatchNNDataSet] : (and equals the number of events (entries) given for class=0 )
: Dataset[MatchNNDataSet] : ... i.e. such that Sum[i=1..N_j]{w_i} = N_classA, j=classA, classB, ...
: Dataset[MatchNNDataSet] : ... (note that N_j is the sum of TRAINING events
: Dataset[MatchNNDataSet] : ..... Testing events are not renormalised nor included in the renormalisation factor!)
: Number of training and testing events
: ---------------------------------------------------------------------------
: Signal -- training events : 100000
: Signal -- testing events : 10000
: Signal -- training and testing events: 110000
: Background -- training events : 200000
: Background -- testing events : 20000
: Background -- training and testing events: 220000
:
DataSetInfo : Correlation matrix (Signal):
: --------------------------------------------------------
: chi2 teta2 distX distY dSlope dSlopeY
: chi2: +1.000 -0.094 +0.508 +0.558 +0.393 +0.145
: teta2: -0.094 +1.000 -0.010 +0.345 -0.010 +0.388
: distX: +0.508 -0.010 +1.000 +0.202 +0.501 +0.230
: distY: +0.558 +0.345 +0.202 +1.000 +0.507 +0.472
: dSlope: +0.393 -0.010 +0.501 +0.507 +1.000 +0.497
: dSlopeY: +0.145 +0.388 +0.230 +0.472 +0.497 +1.000
: --------------------------------------------------------
DataSetInfo : Correlation matrix (Background):
: --------------------------------------------------------
: chi2 teta2 distX distY dSlope dSlopeY
: chi2: +1.000 +0.008 +0.363 +0.312 -0.001 +0.102
: teta2: +0.008 +1.000 +0.217 +0.626 +0.297 +0.493
: distX: +0.363 +0.217 +1.000 +0.062 +0.631 +0.203
: distY: +0.312 +0.626 +0.062 +1.000 +0.250 +0.543
: dSlope: -0.001 +0.297 +0.631 +0.250 +1.000 +0.361
: dSlopeY: +0.102 +0.493 +0.203 +0.543 +0.361 +1.000
: --------------------------------------------------------
DataSetFactory : [MatchNNDataSet] :
:
Factory : [MatchNNDataSet] : Create Transformation "I" with events from all classes.
:
: Transformation, Variable selection :
: Input : variable 'chi2' <---> Output : variable 'chi2'
: Input : variable 'teta2' <---> Output : variable 'teta2'
: Input : variable 'distX' <---> Output : variable 'distX'
: Input : variable 'distY' <---> Output : variable 'distY'
: Input : variable 'dSlope' <---> Output : variable 'dSlope'
: Input : variable 'dSlopeY' <---> Output : variable 'dSlopeY'
TFHandler_Factory : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: 8.4293 9.2426 [ 2.3238e-05 30.000 ]
: teta2: 0.0057581 0.014094 [ 1.5125e-06 0.40287 ]
: distX: 40.107 55.141 [ 3.0518e-05 497.68 ]
: distY: 26.294 37.024 [ 4.5776e-05 499.89 ]
: dSlope: 0.33133 0.23520 [ 1.3022e-05 1.3517 ]
: dSlopeY: 0.0054522 0.0092106 [ 9.3132e-10 0.14883 ]
: -----------------------------------------------------------
: Ranking input variables (method unspecific)...
IdTransformation : Ranking result (top variable is best ranked)
: --------------------------------
: Rank : Variable : Separation
: --------------------------------
: 1 : chi2 : 5.690e-01
: 2 : distX : 3.736e-01
: 3 : distY : 2.091e-01
: 4 : dSlopeY : 8.232e-02
: 5 : dSlope : 8.601e-03
: 6 : teta2 : 3.474e-03
: --------------------------------
Factory : Train method: matching_mlp for Classification
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: -0.43805 0.61618 [ -1.0000 1.0000 ]
: teta2: -0.97142 0.069969 [ -1.0000 1.0000 ]
: distX: -0.83882 0.22159 [ -1.0000 1.0000 ]
: distY: -0.89480 0.14813 [ -1.0000 1.0000 ]
: dSlope: -0.50978 0.34801 [ -1.0000 1.0000 ]
: dSlopeY: -0.92673 0.12377 [ -1.0000 1.0000 ]
: -----------------------------------------------------------
: Training Network
:
: Elapsed time for training with 300000 events: 853 sec
matching_mlp : [MatchNNDataSet] : Evaluation of matching_mlp on training sample (300000 events)
: Elapsed time for evaluation of 300000 events: 0.495 sec
: Creating xml weight file: MatchNNDataSet/weights/TMVAClassification_matching_mlp.weights.xml
: Creating standalone class: MatchNNDataSet/weights/TMVAClassification_matching_mlp.class.C
: Write special histos to file: matching_ghost_mlp_training.root:/MatchNNDataSet/Method_MLP/matching_mlp
Factory : Training finished
:
: Ranking input variables (method specific)...
matching_mlp : Ranking result (top variable is best ranked)
: --------------------------------
: Rank : Variable : Importance
: --------------------------------
: 1 : distY : 5.213e+02
: 2 : teta2 : 4.435e+02
: 3 : dSlopeY : 4.414e+02
: 4 : distX : 3.118e+02
: 5 : dSlope : 2.646e+01
: 6 : chi2 : 7.066e+00
: --------------------------------
Factory : === Destroy and recreate all methods via weight files for testing ===
:
: Reading weight file: MatchNNDataSet/weights/TMVAClassification_matching_mlp.weights.xml
matching_mlp : Building Network.
: Initializing weights
Factory : Test all methods
Factory : Test method: matching_mlp for Classification performance
:
matching_mlp : [MatchNNDataSet] : Evaluation of matching_mlp on testing sample (30000 events)
: Elapsed time for evaluation of 30000 events: 0.0597 sec
Factory : Evaluate all methods
Factory : Evaluate classifier: matching_mlp
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: -0.29449 0.63524 [ -0.99999 0.99994 ]
: teta2: -0.97212 0.070073 [ -1.0000 0.32779 ]
: distX: -0.80346 0.24082 [ -1.0000 0.97553 ]
: distY: -0.87751 0.16136 [ -1.0000 0.95680 ]
: dSlope: -0.50293 0.35312 [ -0.99999 0.86422 ]
: dSlopeY: -0.91903 0.13042 [ -1.0000 0.95486 ]
: -----------------------------------------------------------
matching_mlp : [MatchNNDataSet] : Loop over test events and fill histograms with classifier response...
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: -0.29449 0.63524 [ -0.99999 0.99994 ]
: teta2: -0.97212 0.070073 [ -1.0000 0.32779 ]
: distX: -0.80346 0.24082 [ -1.0000 0.97553 ]
: distY: -0.87751 0.16136 [ -1.0000 0.95680 ]
: dSlope: -0.50293 0.35312 [ -0.99999 0.86422 ]
: dSlopeY: -0.91903 0.13042 [ -1.0000 0.95486 ]
: -----------------------------------------------------------
:
: Evaluation results ranked by best signal efficiency and purity (area)
: -------------------------------------------------------------------------------------------------------------------
: DataSet MVA
: Name: Method: ROC-integ
: MatchNNDataSet matching_mlp : 0.958
: -------------------------------------------------------------------------------------------------------------------
:
: Testing efficiency compared to training efficiency (overtraining check)
: -------------------------------------------------------------------------------------------------------------------
: DataSet MVA Signal efficiency: from test sample (from training sample)
: Name: Method: @B=0.01 @B=0.10 @B=0.30
: -------------------------------------------------------------------------------------------------------------------
: MatchNNDataSet matching_mlp : 0.450 (0.414) 0.886 (0.882) 0.980 (0.979)
: -------------------------------------------------------------------------------------------------------------------
:
Dataset:MatchNNDataSet : Created tree 'TestTree' with 30000 events
:
Dataset:MatchNNDataSet : Created tree 'TrainTree' with 300000 events
:
Factory : Thank you for using TMVA!
: For citation information, please visit: http://tmva.sf.net/citeTMVA.html
Transforming nn_electron_training/result/MatchNNDataSet/weights/TMVAClassification_matching_mlp.class.C ...
Found minimum and maximum values for 6 variables.
Found 3 matrices:
1. fWeightMatrix0to1 with 7 columns and 8 rows
2. fWeightMatrix1to2 with 9 columns and 6 rows
3. fWeightMatrix2to3 with 7 columns and 1 rows

268
outputs_nn/output_B_res.txt

@ -1,268 +0,0 @@
: Parsing option string:
: ... "V:!Silent:Color:DrawProgressBar:AnalysisType=Classification"
: The following options are set:
: - By User:
: V: "True" [Verbose flag]
: Color: "True" [Flag for coloured screen output (default: True, if in batch mode: False)]
: Silent: "False" [Batch mode: boolean silent flag inhibiting any output from TMVA after the creation of the factory class object (default: False)]
: DrawProgressBar: "True" [Draw progress bar to display training, testing and evaluation schedule (default: True)]
: AnalysisType: "Classification" [Set the analysis type (Classification, Regression, Multiclass, Auto) (default: Auto)]
: - Default:
: VerboseLevel: "Info" [VerboseLevel (Debug/Verbose/Info)]
: Transformations: "I" [List of transformations to test; formatting example: "Transformations=I;D;P;U;G,D", for identity, decorrelation, PCA, Uniform and Gaussianisation followed by decorrelation transformations]
: Correlations: "False" [boolean to show correlation in output]
: ROC: "True" [boolean to show ROC in output]
: ModelPersistence: "True" [Option to save the trained model in xml file or using serialization]
DataSetInfo : [MatchNNDataSet] : Added class "Signal"
: Add Tree Signal of type Signal with 7718 events
DataSetInfo : [MatchNNDataSet] : Added class "Background"
: Add Tree Bkg of type Background with 11895204 events
: Dataset[MatchNNDataSet] : Class index : 0 name : Signal
: Dataset[MatchNNDataSet] : Class index : 1 name : Background
Factory : Booking method: matching_mlp
:
: Parsing option string:
: ... "!H:V:TrainingMethod=BP:NeuronType=ReLU:EstimatorType=CE:VarTransform=Norm:NCycles=700:HiddenLayers=N+2,N:TestRate=50:Sampling=1.0:SamplingImportance=1.0:LearningRate=0.02:DecayRate=0.01:!UseRegulator"
: The following options are set:
: - By User:
: <none>
: - Default:
: Boost_num: "0" [Number of times the classifier will be boosted]
: Parsing option string:
: ... "!H:V:TrainingMethod=BP:NeuronType=ReLU:EstimatorType=CE:VarTransform=Norm:NCycles=700:HiddenLayers=N+2,N:TestRate=50:Sampling=1.0:SamplingImportance=1.0:LearningRate=0.02:DecayRate=0.01:!UseRegulator"
: The following options are set:
: - By User:
: NCycles: "700" [Number of training cycles]
: HiddenLayers: "N+2,N" [Specification of hidden layer architecture]
: NeuronType: "ReLU" [Neuron activation function type]
: EstimatorType: "CE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood]
: V: "True" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
: VarTransform: "Norm" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
: H: "False" [Print method-specific help message]
: TrainingMethod: "BP" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)]
: LearningRate: "2.000000e-02" [ANN learning rate parameter]
: DecayRate: "1.000000e-02" [Decay rate for learning parameter]
: TestRate: "50" [Test for overtraining performed at each #th epochs]
: Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch]
: SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.]
: UseRegulator: "False" [Use regulator to avoid over-training]
: - Default:
: RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')]
: NeuronInputType: "sum" [Neuron input function type]
: VerbosityLevel: "Default" [Verbosity level]
: CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
: IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
: EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)]
: SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training]
: SamplingTraining: "True" [The training sample is sampled]
: SamplingTesting: "False" [The testing sample is sampled]
: ResetStep: "50" [How often BFGS should reset history]
: Tau: "3.000000e+00" [LineSearch "size step"]
: BPMode: "sequential" [Back-propagation learning mode: sequential or batch]
: BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events]
: ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)]
: ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)]
: UpdateLimit: "10000" [Maximum times of regulator update]
: CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value]
: WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range]
matching_mlp : [MatchNNDataSet] : Create Transformation "Norm" with events from all classes.
:
: Transformation, Variable selection :
: Input : variable 'chi2' <---> Output : variable 'chi2'
: Input : variable 'teta2' <---> Output : variable 'teta2'
: Input : variable 'distX' <---> Output : variable 'distX'
: Input : variable 'distY' <---> Output : variable 'distY'
: Input : variable 'dSlope' <---> Output : variable 'dSlope'
: Input : variable 'dSlopeY' <---> Output : variable 'dSlopeY'
matching_mlp : Building Network.
: Initializing weights
Factory : Train all methods
: Rebuilding Dataset MatchNNDataSet
: Parsing option string:
: ... "SplitMode=random:V:nTrain_Signal=0:nTrain_Background=20000.0:nTest_Signal=1000.0:nTest_Background=5000.0"
: The following options are set:
: - By User:
: SplitMode: "Random" [Method of picking training and testing events (default: random)]
: nTrain_Signal: "0" [Number of training events of class Signal (default: 0 = all)]
: nTest_Signal: "1000" [Number of test events of class Signal (default: 0 = all)]
: nTrain_Background: "20000" [Number of training events of class Background (default: 0 = all)]
: nTest_Background: "5000" [Number of test events of class Background (default: 0 = all)]
: V: "True" [Verbosity (default: true)]
: - Default:
: MixMode: "SameAsSplitMode" [Method of mixing events of different classes into one dataset (default: SameAsSplitMode)]
: SplitSeed: "100" [Seed for random event shuffling]
: NormMode: "EqualNumEvents" [Overall renormalisation of event-by-event weights used in the training (NumEvents: average weight of 1 per event, independently for signal and background; EqualNumEvents: average weight of 1 per event for signal, and sum of weights for background equal to sum of weights for signal)]
: ScaleWithPreselEff: "False" [Scale the number of requested events by the eff. of the preselection cuts (or not)]
: TrainTestSplit_Signal: "0.000000e+00" [Number of test events of class Signal (default: 0 = all)]
: TrainTestSplit_Background: "0.000000e+00" [Number of test events of class Background (default: 0 = all)]
: VerboseLevel: "Info" [VerboseLevel (Debug/Verbose/Info)]
: Correlations: "True" [Boolean to show correlation output (Default: true)]
: CalcCorrelations: "True" [Compute correlations and also some variable statistics, e.g. min/max (Default: true )]
: Building event vectors for type 2 Signal
: Dataset[MatchNNDataSet] : create input formulas for tree Signal
: Building event vectors for type 2 Background
: Dataset[MatchNNDataSet] : create input formulas for tree Bkg
DataSetFactory : [MatchNNDataSet] : Number of events in input trees
:
:
: Dataset[MatchNNDataSet] : Weight renormalisation mode: "EqualNumEvents": renormalises all event classes ...
: Dataset[MatchNNDataSet] : such that the effective (weighted) number of events in each class is the same
: Dataset[MatchNNDataSet] : (and equals the number of events (entries) given for class=0 )
: Dataset[MatchNNDataSet] : ... i.e. such that Sum[i=1..N_j]{w_i} = N_classA, j=classA, classB, ...
: Dataset[MatchNNDataSet] : ... (note that N_j is the sum of TRAINING events
: Dataset[MatchNNDataSet] : ..... Testing events are not renormalised nor included in the renormalisation factor!)
: Number of training and testing events
: ---------------------------------------------------------------------------
: Signal -- training events : 6718
: Signal -- testing events : 1000
: Signal -- training and testing events: 7718
: Background -- training events : 20000
: Background -- testing events : 5000
: Background -- training and testing events: 25000
:
DataSetInfo : Correlation matrix (Signal):
: --------------------------------------------------------
: chi2 teta2 distX distY dSlope dSlopeY
: chi2: +1.000 -0.083 +0.248 +0.242 +0.206 +0.042
: teta2: -0.083 +1.000 +0.038 +0.508 +0.191 +0.637
: distX: +0.248 +0.038 +1.000 -0.175 +0.681 +0.107
: distY: +0.242 +0.508 -0.175 +1.000 +0.349 +0.484
: dSlope: +0.206 +0.191 +0.681 +0.349 +1.000 +0.349
: dSlopeY: +0.042 +0.637 +0.107 +0.484 +0.349 +1.000
: --------------------------------------------------------
DataSetInfo : Correlation matrix (Background):
: --------------------------------------------------------
: chi2 teta2 distX distY dSlope dSlopeY
: chi2: +1.000 -0.024 +0.242 +0.209 +0.046 +0.055
: teta2: -0.024 +1.000 +0.245 +0.652 +0.371 +0.483
: distX: +0.242 +0.245 +1.000 +0.017 +0.776 +0.198
: distY: +0.209 +0.652 +0.017 +1.000 +0.312 +0.554
: dSlope: +0.046 +0.371 +0.776 +0.312 +1.000 +0.392
: dSlopeY: +0.055 +0.483 +0.198 +0.554 +0.392 +1.000
: --------------------------------------------------------
DataSetFactory : [MatchNNDataSet] :
:
Factory : [MatchNNDataSet] : Create Transformation "I" with events from all classes.
:
: Transformation, Variable selection :
: Input : variable 'chi2' <---> Output : variable 'chi2'
: Input : variable 'teta2' <---> Output : variable 'teta2'
: Input : variable 'distX' <---> Output : variable 'distX'
: Input : variable 'distY' <---> Output : variable 'distY'
: Input : variable 'dSlope' <---> Output : variable 'dSlope'
: Input : variable 'dSlopeY' <---> Output : variable 'dSlopeY'
TFHandler_Factory : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: 14.879 7.6783 [ 0.35410 29.998 ]
: teta2: 0.0053594 0.015677 [ 5.5206e-06 0.34331 ]
: distX: 74.975 63.347 [ 0.00024414 487.68 ]
: distY: 35.490 43.750 [ 8.3923e-05 497.42 ]
: dSlope: 0.35788 0.24459 [ 6.4602e-05 1.2881 ]
: dSlopeY: 0.0073112 0.012369 [ 3.9814e-08 0.14883 ]
: -----------------------------------------------------------
: Ranking input variables (method unspecific)...
IdTransformation : Ranking result (top variable is best ranked)
: --------------------------------
: Rank : Variable : Separation
: --------------------------------
: 1 : chi2 : 9.921e-02
: 2 : distY : 8.773e-02
: 3 : dSlopeY : 2.784e-02
: 4 : teta2 : 2.748e-02
: 5 : dSlope : 2.662e-02
: 6 : distX : 1.420e-02
: --------------------------------
Factory : Train method: matching_mlp for Classification
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: -0.020078 0.51803 [ -1.0000 1.0000 ]
: teta2: -0.96881 0.091329 [ -1.0000 1.0000 ]
: distX: -0.69253 0.25979 [ -1.0000 1.0000 ]
: distY: -0.85730 0.17591 [ -1.0000 1.0000 ]
: dSlope: -0.44439 0.37979 [ -1.0000 1.0000 ]
: dSlopeY: -0.90175 0.16622 [ -1.0000 1.0000 ]
: -----------------------------------------------------------
: Training Network
:
: Elapsed time for training with 26718 events: 57.7 sec
matching_mlp : [MatchNNDataSet] : Evaluation of matching_mlp on training sample (26718 events)
: Elapsed time for evaluation of 26718 events: 0.0346 sec
: Creating xml weight file: MatchNNDataSet/weights/TMVAClassification_matching_mlp.weights.xml
: Creating standalone class: MatchNNDataSet/weights/TMVAClassification_matching_mlp.class.C
: Write special histos to file: matching_ghost_mlp_training.root:/MatchNNDataSet/Method_MLP/matching_mlp
Factory : Training finished
:
: Ranking input variables (method specific)...
matching_mlp : Ranking result (top variable is best ranked)
: --------------------------------
: Rank : Variable : Importance
: --------------------------------
: 1 : distY : 1.467e+02
: 2 : teta2 : 6.884e+01
: 3 : distX : 6.627e+01
: 4 : dSlopeY : 3.066e+01
: 5 : dSlope : 1.175e+01
: 6 : chi2 : 2.632e+00
: --------------------------------
Factory : === Destroy and recreate all methods via weight files for testing ===
:
: Reading weight file: MatchNNDataSet/weights/TMVAClassification_matching_mlp.weights.xml
matching_mlp : Building Network.
: Initializing weights
Factory : Test all methods
Factory : Test method: matching_mlp for Classification performance
:
matching_mlp : [MatchNNDataSet] : Evaluation of matching_mlp on testing sample (6000 events)
: Elapsed time for evaluation of 6000 events: 0.0118 sec
Factory : Evaluate all methods
Factory : Evaluate classifier: matching_mlp
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: 0.10881 0.51711 [ -1.0020 0.99902 ]
: teta2: -0.96093 0.10865 [ -0.99988 0.46950 ]
: distX: -0.67673 0.27337 [ -0.99968 0.75285 ]
: distY: -0.82663 0.20236 [ -0.99997 0.83868 ]
: dSlope: -0.46394 0.38477 [ -0.99839 0.97924 ]
: dSlopeY: -0.89235 0.16561 [ -1.0000 0.93883 ]
: -----------------------------------------------------------
matching_mlp : [MatchNNDataSet] : Loop over test events and fill histograms with classifier response...
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: 0.10881 0.51711 [ -1.0020 0.99902 ]
: teta2: -0.96093 0.10865 [ -0.99988 0.46950 ]
: distX: -0.67673 0.27337 [ -0.99968 0.75285 ]
: distY: -0.82663 0.20236 [ -0.99997 0.83868 ]
: dSlope: -0.46394 0.38477 [ -0.99839 0.97924 ]
: dSlopeY: -0.89235 0.16561 [ -1.0000 0.93883 ]
: -----------------------------------------------------------
:
: Evaluation results ranked by best signal efficiency and purity (area)
: -------------------------------------------------------------------------------------------------------------------
: DataSet MVA
: Name: Method: ROC-integ
: MatchNNDataSet matching_mlp : 0.842
: -------------------------------------------------------------------------------------------------------------------
:
: Testing efficiency compared to training efficiency (overtraining check)
: -------------------------------------------------------------------------------------------------------------------
: DataSet MVA Signal efficiency: from test sample (from training sample)
: Name: Method: @B=0.01 @B=0.10 @B=0.30
: -------------------------------------------------------------------------------------------------------------------
: MatchNNDataSet matching_mlp : 0.075 (0.082) 0.476 (0.467) 0.841 (0.828)
: -------------------------------------------------------------------------------------------------------------------
:
Dataset:MatchNNDataSet : Created tree 'TestTree' with 6000 events
:
Dataset:MatchNNDataSet : Created tree 'TrainTree' with 26718 events
:
Factory : Thank you for using TMVA!
: For citation information, please visit: http://tmva.sf.net/citeTMVA.html
Transforming nn_electron_training/result/MatchNNDataSet/weights/TMVAClassification_matching_mlp.class.C ...
Found minimum and maximum values for 6 variables.
Found 3 matrices:
1. fWeightMatrix0to1 with 7 columns and 8 rows
2. fWeightMatrix1to2 with 9 columns and 6 rows
3. fWeightMatrix2to3 with 7 columns and 1 rows

0
outputs_nn/output_D.txt

268
outputs_nn/output_D_res.txt

@ -1,268 +0,0 @@
: Parsing option string:
: ... "V:!Silent:Color:DrawProgressBar:AnalysisType=Classification"
: The following options are set:
: - By User:
: V: "True" [Verbose flag]
: Color: "True" [Flag for coloured screen output (default: True, if in batch mode: False)]
: Silent: "False" [Batch mode: boolean silent flag inhibiting any output from TMVA after the creation of the factory class object (default: False)]
: DrawProgressBar: "True" [Draw progress bar to display training, testing and evaluation schedule (default: True)]
: AnalysisType: "Classification" [Set the analysis type (Classification, Regression, Multiclass, Auto) (default: Auto)]
: - Default:
: VerboseLevel: "Info" [VerboseLevel (Debug/Verbose/Info)]
: Transformations: "I" [List of transformations to test; formatting example: "Transformations=I;D;P;U;G,D", for identity, decorrelation, PCA, Uniform and Gaussianisation followed by decorrelation transformations]
: Correlations: "False" [boolean to show correlation in output]
: ROC: "True" [boolean to show ROC in output]
: ModelPersistence: "True" [Option to save the trained model in xml file or using serialization]
DataSetInfo : [MatchNNDataSet] : Added class "Signal"
: Add Tree Signal of type Signal with 8286 events
DataSetInfo : [MatchNNDataSet] : Added class "Background"
: Add Tree Bkg of type Background with 12762964 events
: Dataset[MatchNNDataSet] : Class index : 0 name : Signal
: Dataset[MatchNNDataSet] : Class index : 1 name : Background
Factory : Booking method: matching_mlp
:
: Parsing option string:
: ... "!H:V:TrainingMethod=BP:NeuronType=ReLU:EstimatorType=CE:VarTransform=Norm:NCycles=700:HiddenLayers=N+2,N:TestRate=50:Sampling=1.0:SamplingImportance=1.0:LearningRate=0.02:DecayRate=0.01:!UseRegulator"
: The following options are set:
: - By User:
: <none>
: - Default:
: Boost_num: "0" [Number of times the classifier will be boosted]
: Parsing option string:
: ... "!H:V:TrainingMethod=BP:NeuronType=ReLU:EstimatorType=CE:VarTransform=Norm:NCycles=700:HiddenLayers=N+2,N:TestRate=50:Sampling=1.0:SamplingImportance=1.0:LearningRate=0.02:DecayRate=0.01:!UseRegulator"
: The following options are set:
: - By User:
: NCycles: "700" [Number of training cycles]
: HiddenLayers: "N+2,N" [Specification of hidden layer architecture]
: NeuronType: "ReLU" [Neuron activation function type]
: EstimatorType: "CE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood]
: V: "True" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
: VarTransform: "Norm" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
: H: "False" [Print method-specific help message]
: TrainingMethod: "BP" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)]
: LearningRate: "2.000000e-02" [ANN learning rate parameter]
: DecayRate: "1.000000e-02" [Decay rate for learning parameter]
: TestRate: "50" [Test for overtraining performed at each #th epochs]
: Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch]
: SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.]
: UseRegulator: "False" [Use regulator to avoid over-training]
: - Default:
: RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')]
: NeuronInputType: "sum" [Neuron input function type]
: VerbosityLevel: "Default" [Verbosity level]
: CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
: IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
: EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)]
: SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training]
: SamplingTraining: "True" [The training sample is sampled]
: SamplingTesting: "False" [The testing sample is sampled]
: ResetStep: "50" [How often BFGS should reset history]
: Tau: "3.000000e+00" [LineSearch "size step"]
: BPMode: "sequential" [Back-propagation learning mode: sequential or batch]
: BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events]
: ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)]
: ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)]
: UpdateLimit: "10000" [Maximum times of regulator update]
: CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value]
: WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range]
matching_mlp : [MatchNNDataSet] : Create Transformation "Norm" with events from all classes.
:
: Transformation, Variable selection :
: Input : variable 'chi2' <---> Output : variable 'chi2'
: Input : variable 'teta2' <---> Output : variable 'teta2'
: Input : variable 'distX' <---> Output : variable 'distX'
: Input : variable 'distY' <---> Output : variable 'distY'
: Input : variable 'dSlope' <---> Output : variable 'dSlope'
: Input : variable 'dSlopeY' <---> Output : variable 'dSlopeY'
matching_mlp : Building Network.
: Initializing weights
Factory : Train all methods
: Rebuilding Dataset MatchNNDataSet
: Parsing option string:
: ... "SplitMode=random:V:nTrain_Signal=0:nTrain_Background=20000.0:nTest_Signal=1000.0:nTest_Background=5000.0"
: The following options are set:
: - By User:
: SplitMode: "Random" [Method of picking training and testing events (default: random)]
: nTrain_Signal: "0" [Number of training events of class Signal (default: 0 = all)]
: nTest_Signal: "1000" [Number of test events of class Signal (default: 0 = all)]
: nTrain_Background: "20000" [Number of training events of class Background (default: 0 = all)]
: nTest_Background: "5000" [Number of test events of class Background (default: 0 = all)]
: V: "True" [Verbosity (default: true)]
: - Default:
: MixMode: "SameAsSplitMode" [Method of mixing events of different classes into one dataset (default: SameAsSplitMode)]
: SplitSeed: "100" [Seed for random event shuffling]
: NormMode: "EqualNumEvents" [Overall renormalisation of event-by-event weights used in the training (NumEvents: average weight of 1 per event, independently for signal and background; EqualNumEvents: average weight of 1 per event for signal, and sum of weights for background equal to sum of weights for signal)]
: ScaleWithPreselEff: "False" [Scale the number of requested events by the eff. of the preselection cuts (or not)]
: TrainTestSplit_Signal: "0.000000e+00" [Number of test events of class Signal (default: 0 = all)]
: TrainTestSplit_Background: "0.000000e+00" [Number of test events of class Background (default: 0 = all)]
: VerboseLevel: "Info" [VerboseLevel (Debug/Verbose/Info)]
: Correlations: "True" [Boolean to show correlation output (Default: true)]
: CalcCorrelations: "True" [Compute correlations and also some variable statistics, e.g. min/max (Default: true )]
: Building event vectors for type 2 Signal
: Dataset[MatchNNDataSet] : create input formulas for tree Signal
: Building event vectors for type 2 Background
: Dataset[MatchNNDataSet] : create input formulas for tree Bkg
DataSetFactory : [MatchNNDataSet] : Number of events in input trees
:
:
: Dataset[MatchNNDataSet] : Weight renormalisation mode: "EqualNumEvents": renormalises all event classes ...
: Dataset[MatchNNDataSet] : such that the effective (weighted) number of events in each class is the same
: Dataset[MatchNNDataSet] : (and equals the number of events (entries) given for class=0 )
: Dataset[MatchNNDataSet] : ... i.e. such that Sum[i=1..N_j]{w_i} = N_classA, j=classA, classB, ...
: Dataset[MatchNNDataSet] : ... (note that N_j is the sum of TRAINING events
: Dataset[MatchNNDataSet] : ..... Testing events are not renormalised nor included in the renormalisation factor!)
: Number of training and testing events
: ---------------------------------------------------------------------------
: Signal -- training events : 7286
: Signal -- testing events : 1000
: Signal -- training and testing events: 8286
: Background -- training events : 20000
: Background -- testing events : 5000
: Background -- training and testing events: 25000
:
DataSetInfo : Correlation matrix (Signal):
: --------------------------------------------------------
: chi2 teta2 distX distY dSlope dSlopeY
: chi2: +1.000 -0.090 +0.190 +0.270 +0.150 +0.032
: teta2: -0.090 +1.000 +0.022 +0.557 +0.231 +0.681
: distX: +0.190 +0.022 +1.000 -0.243 +0.667 +0.066
: distY: +0.270 +0.557 -0.243 +1.000 +0.299 +0.491
: dSlope: +0.150 +0.231 +0.667 +0.299 +1.000 +0.343
: dSlopeY: +0.032 +0.681 +0.066 +0.491 +0.343 +1.000
: --------------------------------------------------------
DataSetInfo : Correlation matrix (Background):
: --------------------------------------------------------
: chi2 teta2 distX distY dSlope dSlopeY
: chi2: +1.000 -0.032 +0.249 +0.208 +0.048 +0.047
: teta2: -0.032 +1.000 +0.256 +0.643 +0.377 +0.464
: distX: +0.249 +0.256 +1.000 +0.027 +0.771 +0.192
: distY: +0.208 +0.643 +0.027 +1.000 +0.323 +0.556
: dSlope: +0.048 +0.377 +0.771 +0.323 +1.000 +0.394
: dSlopeY: +0.047 +0.464 +0.192 +0.556 +0.394 +1.000
: --------------------------------------------------------
DataSetFactory : [MatchNNDataSet] :
:
Factory : [MatchNNDataSet] : Create Transformation "I" with events from all classes.
:
: Transformation, Variable selection :
: Input : variable 'chi2' <---> Output : variable 'chi2'
: Input : variable 'teta2' <---> Output : variable 'teta2'
: Input : variable 'distX' <---> Output : variable 'distX'
: Input : variable 'distY' <---> Output : variable 'distY'
: Input : variable 'dSlope' <---> Output : variable 'dSlope'
: Input : variable 'dSlopeY' <---> Output : variable 'dSlopeY'
TFHandler_Factory : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: 15.110 7.5957 [ 0.25759 29.998 ]
: teta2: 0.0049007 0.015613 [ 1.1810e-05 0.34609 ]
: distX: 77.540 64.030 [ 0.00059319 494.45 ]
: distY: 35.596 43.128 [ 0.0016556 497.11 ]
: dSlope: 0.37313 0.24282 [ 0.00012810 1.2803 ]
: dSlopeY: 0.0071048 0.011434 [ 4.9639e-07 0.14679 ]
: -----------------------------------------------------------
: Ranking input variables (method unspecific)...
IdTransformation : Ranking result (top variable is best ranked)
: --------------------------------
: Rank : Variable : Separation
: --------------------------------
: 1 : chi2 : 8.701e-02
: 2 : distY : 7.455e-02
: 3 : dSlope : 6.957e-02
: 4 : teta2 : 4.316e-02
: 5 : dSlopeY : 2.562e-02
: 6 : distX : 1.371e-02
: --------------------------------
Factory : Train method: matching_mlp for Classification
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: -0.0011851 0.51079 [ -1.0000 1.0000 ]
: teta2: -0.97175 0.090226 [ -1.0000 1.0000 ]
: distX: -0.68636 0.25900 [ -1.0000 1.0000 ]
: distY: -0.85679 0.17352 [ -1.0000 1.0000 ]
: dSlope: -0.41728 0.37935 [ -1.0000 1.0000 ]
: dSlopeY: -0.90320 0.15579 [ -1.0000 1.0000 ]
: -----------------------------------------------------------
: Training Network
:
: Elapsed time for training with 27286 events: 59.2 sec
matching_mlp : [MatchNNDataSet] : Evaluation of matching_mlp on training sample (27286 events)
: Elapsed time for evaluation of 27286 events: 0.0331 sec
: Creating xml weight file: MatchNNDataSet/weights/TMVAClassification_matching_mlp.weights.xml
: Creating standalone class: MatchNNDataSet/weights/TMVAClassification_matching_mlp.class.C
: Write special histos to file: matching_ghost_mlp_training.root:/MatchNNDataSet/Method_MLP/matching_mlp
Factory : Training finished
:
: Ranking input variables (method specific)...
matching_mlp : Ranking result (top variable is best ranked)
: --------------------------------
: Rank : Variable : Importance
: --------------------------------
: 1 : distY : 1.487e+02
: 2 : distX : 9.251e+01
: 3 : dSlopeY : 5.612e+01
: 4 : teta2 : 3.951e+01
: 5 : dSlope : 1.219e+01
: 6 : chi2 : 1.428e+00
: --------------------------------
Factory : === Destroy and recreate all methods via weight files for testing ===
:
: Reading weight file: MatchNNDataSet/weights/TMVAClassification_matching_mlp.weights.xml
matching_mlp : Building Network.
: Initializing weights
Factory : Test all methods
Factory : Test method: matching_mlp for Classification performance
:
matching_mlp : [MatchNNDataSet] : Evaluation of matching_mlp on testing sample (6000 events)
: Elapsed time for evaluation of 6000 events: 0.0113 sec
Factory : Evaluate all methods
Factory : Evaluate classifier: matching_mlp
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: 0.10129 0.51080 [ -0.98564 0.99991 ]
: teta2: -0.96473 0.096760 [ -0.99997 0.43123 ]
: distX: -0.68127 0.26859 [ -0.99983 0.92711 ]
: distY: -0.83124 0.20417 [ -0.99994 1.0115 ]
: dSlope: -0.45660 0.39080 [ -0.99695 0.96415 ]
: dSlopeY: -0.89629 0.16201 [ -0.99999 1.0015 ]
: -----------------------------------------------------------
matching_mlp : [MatchNNDataSet] : Loop over test events and fill histograms with classifier response...
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: 0.10129 0.51080 [ -0.98564 0.99991 ]
: teta2: -0.96473 0.096760 [ -0.99997 0.43123 ]
: distX: -0.68127 0.26859 [ -0.99983 0.92711 ]
: distY: -0.83124 0.20417 [ -0.99994 1.0115 ]
: dSlope: -0.45660 0.39080 [ -0.99695 0.96415 ]
: dSlopeY: -0.89629 0.16201 [ -0.99999 1.0015 ]
: -----------------------------------------------------------
:
: Evaluation results ranked by best signal efficiency and purity (area)
: -------------------------------------------------------------------------------------------------------------------
: DataSet MVA
: Name: Method: ROC-integ
: MatchNNDataSet matching_mlp : 0.854
: -------------------------------------------------------------------------------------------------------------------
:
: Testing efficiency compared to training efficiency (overtraining check)
: -------------------------------------------------------------------------------------------------------------------
: DataSet MVA Signal efficiency: from test sample (from training sample)
: Name: Method: @B=0.01 @B=0.10 @B=0.30
: -------------------------------------------------------------------------------------------------------------------
: MatchNNDataSet matching_mlp : 0.091 (0.089) 0.501 (0.494) 0.851 (0.854)
: -------------------------------------------------------------------------------------------------------------------
:
Dataset:MatchNNDataSet : Created tree 'TestTree' with 6000 events
:
Dataset:MatchNNDataSet : Created tree 'TrainTree' with 27286 events
:
Factory : Thank you for using TMVA!
: For citation information, please visit: http://tmva.sf.net/citeTMVA.html
Transforming nn_electron_training/result/MatchNNDataSet/weights/TMVAClassification_matching_mlp.class.C ...
Found minimum and maximum values for 6 variables.
Found 3 matrices:
1. fWeightMatrix0to1 with 7 columns and 8 rows
2. fWeightMatrix1to2 with 9 columns and 6 rows
3. fWeightMatrix2to3 with 7 columns and 1 rows

268
outputs_nn/output_both.txt

@ -1,268 +0,0 @@
: Parsing option string:
: ... "V:!Silent:Color:DrawProgressBar:AnalysisType=Classification"
: The following options are set:
: - By User:
: V: "True" [Verbose flag]
: Color: "True" [Flag for coloured screen output (default: True, if in batch mode: False)]
: Silent: "False" [Batch mode: boolean silent flag inhibiting any output from TMVA after the creation of the factory class object (default: False)]
: DrawProgressBar: "True" [Draw progress bar to display training, testing and evaluation schedule (default: True)]
: AnalysisType: "Classification" [Set the analysis type (Classification, Regression, Multiclass, Auto) (default: Auto)]
: - Default:
: VerboseLevel: "Info" [VerboseLevel (Debug/Verbose/Info)]
: Transformations: "I" [List of transformations to test; formatting example: "Transformations=I;D;P;U;G,D", for identity, decorrelation, PCA, Uniform and Gaussianisation followed by decorrelation transformations]
: Correlations: "False" [boolean to show correlation in output]
: ROC: "True" [boolean to show ROC in output]
: ModelPersistence: "True" [Option to save the trained model in xml file or using serialization]
DataSetInfo : [MatchNNDataSet] : Added class "Signal"
: Add Tree Signal of type Signal with 13829 events
DataSetInfo : [MatchNNDataSet] : Added class "Background"
: Add Tree Bkg of type Background with 29144752 events
: Dataset[MatchNNDataSet] : Class index : 0 name : Signal
: Dataset[MatchNNDataSet] : Class index : 1 name : Background
Factory : Booking method: matching_mlp
:
: Parsing option string:
: ... "!H:V:TrainingMethod=BP:NeuronType=ReLU:EstimatorType=CE:VarTransform=Norm:NCycles=700:HiddenLayers=N+2,N:TestRate=50:Sampling=1.0:SamplingImportance=1.0:LearningRate=0.02:DecayRate=0.01:!UseRegulator"
: The following options are set:
: - By User:
: <none>
: - Default:
: Boost_num: "0" [Number of times the classifier will be boosted]
: Parsing option string:
: ... "!H:V:TrainingMethod=BP:NeuronType=ReLU:EstimatorType=CE:VarTransform=Norm:NCycles=700:HiddenLayers=N+2,N:TestRate=50:Sampling=1.0:SamplingImportance=1.0:LearningRate=0.02:DecayRate=0.01:!UseRegulator"
: The following options are set:
: - By User:
: NCycles: "700" [Number of training cycles]
: HiddenLayers: "N+2,N" [Specification of hidden layer architecture]
: NeuronType: "ReLU" [Neuron activation function type]
: EstimatorType: "CE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood]
: V: "True" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
: VarTransform: "Norm" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
: H: "False" [Print method-specific help message]
: TrainingMethod: "BP" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)]
: LearningRate: "2.000000e-02" [ANN learning rate parameter]
: DecayRate: "1.000000e-02" [Decay rate for learning parameter]
: TestRate: "50" [Test for overtraining performed at each #th epochs]
: Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch]
: SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.]
: UseRegulator: "False" [Use regulator to avoid over-training]
: - Default:
: RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')]
: NeuronInputType: "sum" [Neuron input function type]
: VerbosityLevel: "Default" [Verbosity level]
: CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
: IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
: EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)]
: SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training]
: SamplingTraining: "True" [The training sample is sampled]
: SamplingTesting: "False" [The testing sample is sampled]
: ResetStep: "50" [How often BFGS should reset history]
: Tau: "3.000000e+00" [LineSearch "size step"]
: BPMode: "sequential" [Back-propagation learning mode: sequential or batch]
: BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events]
: ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)]
: ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)]
: UpdateLimit: "10000" [Maximum times of regulator update]
: CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value]
: WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range]
matching_mlp : [MatchNNDataSet] : Create Transformation "Norm" with events from all classes.
:
: Transformation, Variable selection :
: Input : variable 'chi2' <---> Output : variable 'chi2'
: Input : variable 'teta2' <---> Output : variable 'teta2'
: Input : variable 'distX' <---> Output : variable 'distX'
: Input : variable 'distY' <---> Output : variable 'distY'
: Input : variable 'dSlope' <---> Output : variable 'dSlope'
: Input : variable 'dSlopeY' <---> Output : variable 'dSlopeY'
matching_mlp : Building Network.
: Initializing weights
Factory : Train all methods
: Rebuilding Dataset MatchNNDataSet
: Parsing option string:
: ... "SplitMode=random:V:nTrain_Signal=0:nTrain_Background=20000.0:nTest_Signal=2000.0:nTest_Background=5000.0"
: The following options are set:
: - By User:
: SplitMode: "Random" [Method of picking training and testing events (default: random)]
: nTrain_Signal: "0" [Number of training events of class Signal (default: 0 = all)]
: nTest_Signal: "2000" [Number of test events of class Signal (default: 0 = all)]
: nTrain_Background: "20000" [Number of training events of class Background (default: 0 = all)]
: nTest_Background: "5000" [Number of test events of class Background (default: 0 = all)]
: V: "True" [Verbosity (default: true)]
: - Default:
: MixMode: "SameAsSplitMode" [Method of mixing events of different classes into one dataset (default: SameAsSplitMode)]
: SplitSeed: "100" [Seed for random event shuffling]
: NormMode: "EqualNumEvents" [Overall renormalisation of event-by-event weights used in the training (NumEvents: average weight of 1 per event, independently for signal and background; EqualNumEvents: average weight of 1 per event for signal, and sum of weights for background equal to sum of weights for signal)]
: ScaleWithPreselEff: "False" [Scale the number of requested events by the eff. of the preselection cuts (or not)]
: TrainTestSplit_Signal: "0.000000e+00" [Number of test events of class Signal (default: 0 = all)]
: TrainTestSplit_Background: "0.000000e+00" [Number of test events of class Background (default: 0 = all)]
: VerboseLevel: "Info" [VerboseLevel (Debug/Verbose/Info)]
: Correlations: "True" [Boolean to show correlation output (Default: true)]
: CalcCorrelations: "True" [Compute correlations and also some variable statistics, e.g. min/max (Default: true )]
: Building event vectors for type 2 Signal
: Dataset[MatchNNDataSet] : create input formulas for tree Signal
: Building event vectors for type 2 Background
: Dataset[MatchNNDataSet] : create input formulas for tree Bkg
DataSetFactory : [MatchNNDataSet] : Number of events in input trees
:
:
: Dataset[MatchNNDataSet] : Weight renormalisation mode: "EqualNumEvents": renormalises all event classes ...
: Dataset[MatchNNDataSet] : such that the effective (weighted) number of events in each class is the same
: Dataset[MatchNNDataSet] : (and equals the number of events (entries) given for class=0 )
: Dataset[MatchNNDataSet] : ... i.e. such that Sum[i=1..N_j]{w_i} = N_classA, j=classA, classB, ...
: Dataset[MatchNNDataSet] : ... (note that N_j is the sum of TRAINING events
: Dataset[MatchNNDataSet] : ..... Testing events are not renormalised nor included in the renormalisation factor!)
: Number of training and testing events
: ---------------------------------------------------------------------------
: Signal -- training events : 11829
: Signal -- testing events : 2000
: Signal -- training and testing events: 13829
: Background -- training events : 20000
: Background -- testing events : 5000
: Background -- training and testing events: 25000
:
DataSetInfo : Correlation matrix (Signal):
: --------------------------------------------------------
: chi2 teta2 distX distY dSlope dSlopeY
: chi2: +1.000 -0.082 +0.200 +0.302 +0.182 +0.049
: teta2: -0.082 +1.000 +0.033 +0.461 +0.179 +0.632
: distX: +0.200 +0.033 +1.000 -0.222 +0.685 +0.075
: distY: +0.302 +0.461 -0.222 +1.000 +0.306 +0.463
: dSlope: +0.182 +0.179 +0.685 +0.306 +1.000 +0.319
: dSlopeY: +0.049 +0.632 +0.075 +0.463 +0.319 +1.000
: --------------------------------------------------------
DataSetInfo : Correlation matrix (Background):
: --------------------------------------------------------
: chi2 teta2 distX distY dSlope dSlopeY
: chi2: +1.000 -0.003 +0.368 +0.313 -0.005 +0.094
: teta2: -0.003 +1.000 +0.215 +0.617 +0.302 +0.491
: distX: +0.368 +0.215 +1.000 +0.065 +0.633 +0.203
: distY: +0.313 +0.617 +0.065 +1.000 +0.246 +0.532
: dSlope: -0.005 +0.302 +0.633 +0.246 +1.000 +0.356
: dSlopeY: +0.094 +0.491 +0.203 +0.532 +0.356 +1.000
: --------------------------------------------------------
DataSetFactory : [MatchNNDataSet] :
:
Factory : [MatchNNDataSet] : Create Transformation "I" with events from all classes.
:
: Transformation, Variable selection :
: Input : variable 'chi2' <---> Output : variable 'chi2'
: Input : variable 'teta2' <---> Output : variable 'teta2'
: Input : variable 'distX' <---> Output : variable 'distX'
: Input : variable 'distY' <---> Output : variable 'distY'
: Input : variable 'dSlope' <---> Output : variable 'dSlope'
: Input : variable 'dSlopeY' <---> Output : variable 'dSlopeY'
TFHandler_Factory : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: 13.817 7.9796 [ 0.0011579 29.997 ]
: teta2: 0.0040130 0.012209 [ 1.9755e-06 0.23492 ]
: distX: 71.018 61.492 [ 0.0031776 478.62 ]
: distY: 31.234 37.327 [ 0.00019073 497.26 ]
: dSlope: 0.37346 0.23976 [ 5.9959e-05 1.2822 ]
: dSlopeY: 0.0063004 0.010258 [ 3.9814e-08 0.14883 ]
: -----------------------------------------------------------
: Ranking input variables (method unspecific)...
IdTransformation : Ranking result (top variable is best ranked)
: --------------------------------
: Rank : Variable : Separation
: --------------------------------
: 1 : chi2 : 9.147e-02
: 2 : distY : 5.407e-02
: 3 : teta2 : 4.044e-02
: 4 : dSlope : 3.233e-02
: 5 : distX : 2.801e-02
: 6 : dSlopeY : 1.699e-02
: --------------------------------
Factory : Train method: matching_mlp for Classification
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: -0.078822 0.53204 [ -1.0000 1.0000 ]
: teta2: -0.96585 0.10395 [ -1.0000 1.0000 ]
: distX: -0.70325 0.25696 [ -1.0000 1.0000 ]
: distY: -0.87438 0.15013 [ -1.0000 1.0000 ]
: dSlope: -0.41755 0.37399 [ -1.0000 1.0000 ]
: dSlopeY: -0.91533 0.13785 [ -1.0000 1.0000 ]
: -----------------------------------------------------------
: Training Network
:
: Elapsed time for training with 31829 events: 64.5 sec
matching_mlp : [MatchNNDataSet] : Evaluation of matching_mlp on training sample (31829 events)
: Elapsed time for evaluation of 31829 events: 0.0391 sec
: Creating xml weight file: MatchNNDataSet/weights/TMVAClassification_matching_mlp.weights.xml
: Creating standalone class: MatchNNDataSet/weights/TMVAClassification_matching_mlp.class.C
: Write special histos to file: matching_ghost_mlp_training.root:/MatchNNDataSet/Method_MLP/matching_mlp
Factory : Training finished
:
: Ranking input variables (method specific)...
matching_mlp : Ranking result (top variable is best ranked)
: --------------------------------
: Rank : Variable : Importance
: --------------------------------
: 1 : distY : 3.588e+02
: 2 : dSlopeY : 2.134e+02
: 3 : distX : 1.426e+02
: 4 : teta2 : 7.020e+01
: 5 : dSlope : 1.303e+01
: 6 : chi2 : 3.098e+00
: --------------------------------
Factory : === Destroy and recreate all methods via weight files for testing ===
:
: Reading weight file: MatchNNDataSet/weights/TMVAClassification_matching_mlp.weights.xml
matching_mlp : Building Network.
: Initializing weights
Factory : Test all methods
Factory : Test method: matching_mlp for Classification performance
:
matching_mlp : [MatchNNDataSet] : Evaluation of matching_mlp on testing sample (7000 events)
: Elapsed time for evaluation of 7000 events: 0.0138 sec
Factory : Evaluate all methods
Factory : Evaluate classifier: matching_mlp
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: -0.055433 0.55630 [ -0.99875 1.0001 ]
: teta2: -0.96118 0.10498 [ -0.99999 0.45981 ]
: distX: -0.71039 0.26310 [ -0.99989 0.79697 ]
: distY: -0.86095 0.16028 [ -1.0000 0.89878 ]
: dSlope: -0.43538 0.38054 [ -0.99815 0.98969 ]
: dSlopeY: -0.91076 0.14080 [ -1.0000 0.93883 ]
: -----------------------------------------------------------
matching_mlp : [MatchNNDataSet] : Loop over test events and fill histograms with classifier response...
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: -0.055433 0.55630 [ -0.99875 1.0001 ]
: teta2: -0.96118 0.10498 [ -0.99999 0.45981 ]
: distX: -0.71039 0.26310 [ -0.99989 0.79697 ]
: distY: -0.86095 0.16028 [ -1.0000 0.89878 ]
: dSlope: -0.43538 0.38054 [ -0.99815 0.98969 ]
: dSlopeY: -0.91076 0.14080 [ -1.0000 0.93883 ]
: -----------------------------------------------------------
:
: Evaluation results ranked by best signal efficiency and purity (area)
: -------------------------------------------------------------------------------------------------------------------
: DataSet MVA
: Name: Method: ROC-integ
: MatchNNDataSet matching_mlp : 0.853
: -------------------------------------------------------------------------------------------------------------------
:
: Testing efficiency compared to training efficiency (overtraining check)
: -------------------------------------------------------------------------------------------------------------------
: DataSet MVA Signal efficiency: from test sample (from training sample)
: Name: Method: @B=0.01 @B=0.10 @B=0.30
: -------------------------------------------------------------------------------------------------------------------
: MatchNNDataSet matching_mlp : 0.000 (0.000) 0.470 (0.511) 0.877 (0.882)
: -------------------------------------------------------------------------------------------------------------------
:
Dataset:MatchNNDataSet : Created tree 'TestTree' with 7000 events
:
Dataset:MatchNNDataSet : Created tree 'TrainTree' with 31829 events
:
Factory : Thank you for using TMVA!
: For citation information, please visit: http://tmva.sf.net/citeTMVA.html
Transforming nn_electron_training/result/MatchNNDataSet/weights/TMVAClassification_matching_mlp.class.C ...
Found minimum and maximum values for 6 variables.
Found 3 matrices:
1. fWeightMatrix0to1 with 7 columns and 8 rows
2. fWeightMatrix1to2 with 9 columns and 6 rows
3. fWeightMatrix2to3 with 7 columns and 1 rows

268
outputs_nn/output_e_B.txt

@ -1,268 +0,0 @@
: Parsing option string:
: ... "V:!Silent:Color:DrawProgressBar:AnalysisType=Classification"
: The following options are set:
: - By User:
: V: "True" [Verbose flag]
: Color: "True" [Flag for coloured screen output (default: True, if in batch mode: False)]
: Silent: "False" [Batch mode: boolean silent flag inhibiting any output from TMVA after the creation of the factory class object (default: False)]
: DrawProgressBar: "True" [Draw progress bar to display training, testing and evaluation schedule (default: True)]
: AnalysisType: "Classification" [Set the analysis type (Classification, Regression, Multiclass, Auto) (default: Auto)]
: - Default:
: VerboseLevel: "Info" [VerboseLevel (Debug/Verbose/Info)]
: Transformations: "I" [List of transformations to test; formatting example: "Transformations=I;D;P;U;G,D", for identity, decorrelation, PCA, Uniform and Gaussianisation followed by decorrelation transformations]
: Correlations: "False" [boolean to show correlation in output]
: ROC: "True" [boolean to show ROC in output]
: ModelPersistence: "True" [Option to save the trained model in xml file or using serialization]
DataSetInfo : [MatchNNDataSet] : Added class "Signal"
: Add Tree Signal of type Signal with 187767 events
DataSetInfo : [MatchNNDataSet] : Added class "Background"
: Add Tree Bkg of type Background with 14040318 events
: Dataset[MatchNNDataSet] : Class index : 0 name : Signal
: Dataset[MatchNNDataSet] : Class index : 1 name : Background
Factory : Booking method: matching_mlp
:
: Parsing option string:
: ... "!H:V:TrainingMethod=BP:NeuronType=ReLU:EstimatorType=CE:VarTransform=Norm:NCycles=700:HiddenLayers=N+2,N:TestRate=50:Sampling=1.0:SamplingImportance=1.0:LearningRate=0.02:DecayRate=0.01:!UseRegulator"
: The following options are set:
: - By User:
: <none>
: - Default:
: Boost_num: "0" [Number of times the classifier will be boosted]
: Parsing option string:
: ... "!H:V:TrainingMethod=BP:NeuronType=ReLU:EstimatorType=CE:VarTransform=Norm:NCycles=700:HiddenLayers=N+2,N:TestRate=50:Sampling=1.0:SamplingImportance=1.0:LearningRate=0.02:DecayRate=0.01:!UseRegulator"
: The following options are set:
: - By User:
: NCycles: "700" [Number of training cycles]
: HiddenLayers: "N+2,N" [Specification of hidden layer architecture]
: NeuronType: "ReLU" [Neuron activation function type]
: EstimatorType: "CE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood]
: V: "True" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
: VarTransform: "Norm" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
: H: "False" [Print method-specific help message]
: TrainingMethod: "BP" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)]
: LearningRate: "2.000000e-02" [ANN learning rate parameter]
: DecayRate: "1.000000e-02" [Decay rate for learning parameter]
: TestRate: "50" [Test for overtraining performed at each #th epochs]
: Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch]
: SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.]
: UseRegulator: "False" [Use regulator to avoid over-training]
: - Default:
: RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')]
: NeuronInputType: "sum" [Neuron input function type]
: VerbosityLevel: "Default" [Verbosity level]
: CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
: IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
: EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)]
: SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training]
: SamplingTraining: "True" [The training sample is sampled]
: SamplingTesting: "False" [The testing sample is sampled]
: ResetStep: "50" [How often BFGS should reset history]
: Tau: "3.000000e+00" [LineSearch "size step"]
: BPMode: "sequential" [Back-propagation learning mode: sequential or batch]
: BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events]
: ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)]
: ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)]
: UpdateLimit: "10000" [Maximum times of regulator update]
: CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value]
: WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range]
matching_mlp : [MatchNNDataSet] : Create Transformation "Norm" with events from all classes.
:
: Transformation, Variable selection :
: Input : variable 'chi2' <---> Output : variable 'chi2'
: Input : variable 'teta2' <---> Output : variable 'teta2'
: Input : variable 'distX' <---> Output : variable 'distX'
: Input : variable 'distY' <---> Output : variable 'distY'
: Input : variable 'dSlope' <---> Output : variable 'dSlope'
: Input : variable 'dSlopeY' <---> Output : variable 'dSlopeY'
matching_mlp : Building Network.
: Initializing weights
Factory : Train all methods
: Rebuilding Dataset MatchNNDataSet
: Parsing option string:
: ... "SplitMode=random:V:nTrain_Signal=50000.0:nTrain_Background=500000.0:nTest_Signal=20000.0:nTest_Background=100000.0"
: The following options are set:
: - By User:
: SplitMode: "Random" [Method of picking training and testing events (default: random)]
: nTrain_Signal: "50000" [Number of training events of class Signal (default: 0 = all)]
: nTest_Signal: "20000" [Number of test events of class Signal (default: 0 = all)]
: nTrain_Background: "500000" [Number of training events of class Background (default: 0 = all)]
: nTest_Background: "100000" [Number of test events of class Background (default: 0 = all)]
: V: "True" [Verbosity (default: true)]
: - Default:
: MixMode: "SameAsSplitMode" [Method of mixing events of different classes into one dataset (default: SameAsSplitMode)]
: SplitSeed: "100" [Seed for random event shuffling]
: NormMode: "EqualNumEvents" [Overall renormalisation of event-by-event weights used in the training (NumEvents: average weight of 1 per event, independently for signal and background; EqualNumEvents: average weight of 1 per event for signal, and sum of weights for background equal to sum of weights for signal)]
: ScaleWithPreselEff: "False" [Scale the number of requested events by the eff. of the preselection cuts (or not)]
: TrainTestSplit_Signal: "0.000000e+00" [Number of test events of class Signal (default: 0 = all)]
: TrainTestSplit_Background: "0.000000e+00" [Number of test events of class Background (default: 0 = all)]
: VerboseLevel: "Info" [VerboseLevel (Debug/Verbose/Info)]
: Correlations: "True" [Boolean to show correlation output (Default: true)]
: CalcCorrelations: "True" [Compute correlations and also some variable statistics, e.g. min/max (Default: true )]
: Building event vectors for type 2 Signal
: Dataset[MatchNNDataSet] : create input formulas for tree Signal
: Building event vectors for type 2 Background
: Dataset[MatchNNDataSet] : create input formulas for tree Bkg
DataSetFactory : [MatchNNDataSet] : Number of events in input trees
:
:
: Dataset[MatchNNDataSet] : Weight renormalisation mode: "EqualNumEvents": renormalises all event classes ...
: Dataset[MatchNNDataSet] : such that the effective (weighted) number of events in each class is the same
: Dataset[MatchNNDataSet] : (and equals the number of events (entries) given for class=0 )
: Dataset[MatchNNDataSet] : ... i.e. such that Sum[i=1..N_j]{w_i} = N_classA, j=classA, classB, ...
: Dataset[MatchNNDataSet] : ... (note that N_j is the sum of TRAINING events
: Dataset[MatchNNDataSet] : ..... Testing events are not renormalised nor included in the renormalisation factor!)
: Number of training and testing events
: ---------------------------------------------------------------------------
: Signal -- training events : 50000
: Signal -- testing events : 20000
: Signal -- training and testing events: 70000
: Background -- training events : 500000
: Background -- testing events : 100000
: Background -- training and testing events: 600000
:
DataSetInfo : Correlation matrix (Signal):
: --------------------------------------------------------
: chi2 teta2 distX distY dSlope dSlopeY
: chi2: +1.000 -0.094 +0.511 +0.560 +0.392 +0.141
: teta2: -0.094 +1.000 -0.010 +0.336 -0.009 +0.390
: distX: +0.511 -0.010 +1.000 +0.200 +0.501 +0.229
: distY: +0.560 +0.336 +0.200 +1.000 +0.505 +0.456
: dSlope: +0.392 -0.009 +0.501 +0.505 +1.000 +0.494
: dSlopeY: +0.141 +0.390 +0.229 +0.456 +0.494 +1.000
: --------------------------------------------------------
DataSetInfo : Correlation matrix (Background):
: --------------------------------------------------------
: chi2 teta2 distX distY dSlope dSlopeY
: chi2: +1.000 +0.006 +0.360 +0.312 -0.004 +0.103
: teta2: +0.006 +1.000 +0.218 +0.626 +0.297 +0.487
: distX: +0.360 +0.218 +1.000 +0.065 +0.633 +0.205
: distY: +0.312 +0.626 +0.065 +1.000 +0.250 +0.538
: dSlope: -0.004 +0.297 +0.633 +0.250 +1.000 +0.358
: dSlopeY: +0.103 +0.487 +0.205 +0.538 +0.358 +1.000
: --------------------------------------------------------
DataSetFactory : [MatchNNDataSet] :
:
Factory : [MatchNNDataSet] : Create Transformation "I" with events from all classes.
:
: Transformation, Variable selection :
: Input : variable 'chi2' <---> Output : variable 'chi2'
: Input : variable 'teta2' <---> Output : variable 'teta2'
: Input : variable 'distX' <---> Output : variable 'distX'
: Input : variable 'distY' <---> Output : variable 'distY'
: Input : variable 'dSlope' <---> Output : variable 'dSlope'
: Input : variable 'dSlopeY' <---> Output : variable 'dSlopeY'
TFHandler_Factory : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: 8.4488 9.2446 [ 5.2334e-05 30.000 ]
: teta2: 0.0057495 0.014113 [ 1.2564e-06 0.42331 ]
: distX: 40.154 55.148 [ 4.3869e-05 499.60 ]
: distY: 26.206 36.751 [ 1.9073e-06 499.20 ]
: dSlope: 0.33045 0.23497 [ 4.7125e-07 1.3693 ]
: dSlopeY: 0.0054210 0.0091700 [ 1.0245e-08 0.14939 ]
: -----------------------------------------------------------
: Ranking input variables (method unspecific)...
IdTransformation : Ranking result (top variable is best ranked)
: --------------------------------
: Rank : Variable : Separation
: --------------------------------
: 1 : chi2 : 5.701e-01
: 2 : distX : 3.731e-01
: 3 : distY : 2.108e-01
: 4 : dSlopeY : 8.367e-02
: 5 : dSlope : 8.157e-03
: 6 : teta2 : 3.280e-03
: --------------------------------
Factory : Train method: matching_mlp for Classification
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: -0.43675 0.61631 [ -1.0000 1.0000 ]
: teta2: -0.97284 0.066677 [ -1.0000 1.0000 ]
: distX: -0.83926 0.22077 [ -1.0000 1.0000 ]
: distY: -0.89501 0.14724 [ -1.0000 1.0000 ]
: dSlope: -0.51734 0.34321 [ -1.0000 1.0000 ]
: dSlopeY: -0.92743 0.12276 [ -1.0000 1.0000 ]
: -----------------------------------------------------------
: Training Network
:
: Elapsed time for training with 550000 events: 1.28e+03 sec
matching_mlp : [MatchNNDataSet] : Evaluation of matching_mlp on training sample (550000 events)
: Elapsed time for evaluation of 550000 events: 0.743 sec
: Creating xml weight file: MatchNNDataSet/weights/TMVAClassification_matching_mlp.weights.xml
: Creating standalone class: MatchNNDataSet/weights/TMVAClassification_matching_mlp.class.C
: Write special histos to file: matching_ghost_mlp_training.root:/MatchNNDataSet/Method_MLP/matching_mlp
Factory : Training finished
:
: Ranking input variables (method specific)...
matching_mlp : Ranking result (top variable is best ranked)
: --------------------------------
: Rank : Variable : Importance
: --------------------------------
: 1 : distY : 3.418e+02
: 2 : dSlopeY : 2.969e+02
: 3 : teta2 : 2.257e+02
: 4 : distX : 2.089e+02
: 5 : dSlope : 1.525e+01
: 6 : chi2 : 3.953e+00
: --------------------------------
Factory : === Destroy and recreate all methods via weight files for testing ===
:
: Reading weight file: MatchNNDataSet/weights/TMVAClassification_matching_mlp.weights.xml
matching_mlp : Building Network.
: Initializing weights
Factory : Test all methods
Factory : Test method: matching_mlp for Classification performance
:
matching_mlp : [MatchNNDataSet] : Evaluation of matching_mlp on testing sample (120000 events)
: Elapsed time for evaluation of 120000 events: 0.165 sec
Factory : Evaluate all methods
Factory : Evaluate classifier: matching_mlp
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: -0.15704 0.62353 [ -1.0000 0.99999 ]
: teta2: -0.97379 0.069648 [ -0.99999 0.49021 ]
: distX: -0.76831 0.25194 [ -1.0000 0.99861 ]
: distY: -0.86100 0.17259 [ -1.0000 0.99055 ]
: dSlope: -0.50135 0.35288 [ -1.0000 0.94915 ]
: dSlopeY: -0.91297 0.13474 [ -1.0000 0.99972 ]
: -----------------------------------------------------------
matching_mlp : [MatchNNDataSet] : Loop over test events and fill histograms with classifier response...
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: -0.15704 0.62353 [ -1.0000 0.99999 ]
: teta2: -0.97379 0.069648 [ -0.99999 0.49021 ]
: distX: -0.76831 0.25194 [ -1.0000 0.99861 ]
: distY: -0.86100 0.17259 [ -1.0000 0.99055 ]
: dSlope: -0.50135 0.35288 [ -1.0000 0.94915 ]
: dSlopeY: -0.91297 0.13474 [ -1.0000 0.99972 ]
: -----------------------------------------------------------
:
: Evaluation results ranked by best signal efficiency and purity (area)
: -------------------------------------------------------------------------------------------------------------------
: DataSet MVA
: Name: Method: ROC-integ
: MatchNNDataSet matching_mlp : 0.958
: -------------------------------------------------------------------------------------------------------------------
:
: Testing efficiency compared to training efficiency (overtraining check)
: -------------------------------------------------------------------------------------------------------------------
: DataSet MVA Signal efficiency: from test sample (from training sample)
: Name: Method: @B=0.01 @B=0.10 @B=0.30
: -------------------------------------------------------------------------------------------------------------------
: MatchNNDataSet matching_mlp : 0.434 (0.435) 0.889 (0.888) 0.982 (0.982)
: -------------------------------------------------------------------------------------------------------------------
:
Dataset:MatchNNDataSet : Created tree 'TestTree' with 120000 events
:
Dataset:MatchNNDataSet : Created tree 'TrainTree' with 550000 events
:
Factory : Thank you for using TMVA!
: For citation information, please visit: http://tmva.sf.net/citeTMVA.html
Transforming nn_electron_training/result/MatchNNDataSet/weights/TMVAClassification_matching_mlp.class.C ...
Found minimum and maximum values for 6 variables.
Found 3 matrices:
1. fWeightMatrix0to1 with 7 columns and 8 rows
2. fWeightMatrix1to2 with 9 columns and 6 rows
3. fWeightMatrix2to3 with 7 columns and 1 rows

280
outputs_nn/output_n_B.txt

@ -1,280 +0,0 @@
: Parsing option string:
: ... "V:!Silent:Color:DrawProgressBar:AnalysisType=Classification"
: The following options are set:
: - By User:
: V: "True" [Verbose flag]
: Color: "True" [Flag for coloured screen output (default: True, if in batch mode: False)]
: Silent: "False" [Batch mode: boolean silent flag inhibiting any output from TMVA after the creation of the factory class object (default: False)]
: DrawProgressBar: "True" [Draw progress bar to display training, testing and evaluation schedule (default: True)]
: AnalysisType: "Classification" [Set the analysis type (Classification, Regression, Multiclass, Auto) (default: Auto)]
: - Default:
: VerboseLevel: "Info" [VerboseLevel (Debug/Verbose/Info)]
: Transformations: "I" [List of transformations to test; formatting example: "Transformations=I;D;P;U;G,D", for identity, decorrelation, PCA, Uniform and Gaussianisation followed by decorrelation transformations]
: Correlations: "False" [boolean to show correlation in output]
: ROC: "True" [boolean to show ROC in output]
: ModelPersistence: "True" [Option to save the trained model in xml file or using serialization]
DataSetInfo : [MatchNNDataSet] : Added class "Signal"
: Add Tree Signal of type Signal with 2175608 events
DataSetInfo : [MatchNNDataSet] : Added class "Background"
: Add Tree Bkg of type Background with 14040318 events
: Dataset[MatchNNDataSet] : Class index : 0 name : Signal
: Dataset[MatchNNDataSet] : Class index : 1 name : Background
Factory : Booking method: matching_mlp
:
: Parsing option string:
: ... "!H:V:TrainingMethod=BP:NeuronType=ReLU:EstimatorType=CE:VarTransform=Norm:NCycles=700:HiddenLayers=N+2,N:TestRate=50:Sampling=1.0:SamplingImportance=1.0:LearningRate=0.02:DecayRate=0.01:!UseRegulator"
: The following options are set:
: - By User:
: <none>
: - Default:
: Boost_num: "0" [Number of times the classifier will be boosted]
: Parsing option string:
: ... "!H:V:TrainingMethod=BP:NeuronType=ReLU:EstimatorType=CE:VarTransform=Norm:NCycles=700:HiddenLayers=N+2,N:TestRate=50:Sampling=1.0:SamplingImportance=1.0:LearningRate=0.02:DecayRate=0.01:!UseRegulator"
: The following options are set:
: - By User:
: NCycles: "700" [Number of training cycles]
: HiddenLayers: "N+2,N" [Specification of hidden layer architecture]
: NeuronType: "ReLU" [Neuron activation function type]
: EstimatorType: "CE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood]
: V: "True" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
: VarTransform: "Norm" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
: H: "False" [Print method-specific help message]
: TrainingMethod: "BP" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)]
: LearningRate: "2.000000e-02" [ANN learning rate parameter]
: DecayRate: "1.000000e-02" [Decay rate for learning parameter]
: TestRate: "50" [Test for overtraining performed at each #th epochs]
: Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch]
: SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.]
: UseRegulator: "False" [Use regulator to avoid over-training]
: - Default:
: RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')]
: NeuronInputType: "sum" [Neuron input function type]
: VerbosityLevel: "Default" [Verbosity level]
: CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
: IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
: EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)]
: SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training]
: SamplingTraining: "True" [The training sample is sampled]
: SamplingTesting: "False" [The testing sample is sampled]
: ResetStep: "50" [How often BFGS should reset history]
: Tau: "3.000000e+00" [LineSearch "size step"]
: BPMode: "sequential" [Back-propagation learning mode: sequential or batch]
: BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events]
: ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)]
: ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)]
: UpdateLimit: "10000" [Maximum times of regulator update]
: CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value]
: WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range]
matching_mlp : [MatchNNDataSet] : Create Transformation "Norm" with events from all classes.
:
: Transformation, Variable selection :
: Input : variable 'chi2' <---> Output : variable 'chi2'
: Input : variable 'teta2' <---> Output : variable 'teta2'
: Input : variable 'distX' <---> Output : variable 'distX'
: Input : variable 'distY' <---> Output : variable 'distY'
: Input : variable 'dSlope' <---> Output : variable 'dSlope'
: Input : variable 'dSlopeY' <---> Output : variable 'dSlopeY'
matching_mlp : Building Network.
: Initializing weights
Factory : Train all methods
: Rebuilding Dataset MatchNNDataSet
: Parsing option string:
: ... "SplitMode=random:V:nTrain_Signal=50000.0:nTrain_Background=500000.0:nTest_Signal=20000.0:nTest_Background=100000.0"
: The following options are set:
: - By User:
: SplitMode: "Random" [Method of picking training and testing events (default: random)]
: nTrain_Signal: "50000" [Number of training events of class Signal (default: 0 = all)]
: nTest_Signal: "20000" [Number of test events of class Signal (default: 0 = all)]
: nTrain_Background: "500000" [Number of training events of class Background (default: 0 = all)]
: nTest_Background: "100000" [Number of test events of class Background (default: 0 = all)]
: V: "True" [Verbosity (default: true)]
: - Default:
: MixMode: "SameAsSplitMode" [Method of mixing events of different classes into one dataset (default: SameAsSplitMode)]
: SplitSeed: "100" [Seed for random event shuffling]
: NormMode: "EqualNumEvents" [Overall renormalisation of event-by-event weights used in the training (NumEvents: average weight of 1 per event, independently for signal and background; EqualNumEvents: average weight of 1 per event for signal, and sum of weights for background equal to sum of weights for signal)]
: ScaleWithPreselEff: "False" [Scale the number of requested events by the eff. of the preselection cuts (or not)]
: TrainTestSplit_Signal: "0.000000e+00" [Number of test events of class Signal (default: 0 = all)]
: TrainTestSplit_Background: "0.000000e+00" [Number of test events of class Background (default: 0 = all)]
: VerboseLevel: "Info" [VerboseLevel (Debug/Verbose/Info)]
: Correlations: "True" [Boolean to show correlation output (Default: true)]
: CalcCorrelations: "True" [Compute correlations and also some variable statistics, e.g. min/max (Default: true )]
: Building event vectors for type 2 Signal
: Dataset[MatchNNDataSet] : create input formulas for tree Signal
: Building event vectors for type 2 Background
: Dataset[MatchNNDataSet] : create input formulas for tree Bkg
DataSetFactory : [MatchNNDataSet] : Number of events in input trees
: Dataset[MatchNNDataSet] : Signal requirement: "chi2<15 && distX<250 && distY<400 && dSlope<1.5 && dSlopeY<0.15"
: Dataset[MatchNNDataSet] : Signal -- number of events passed: 2151182 / sum of weights: 2.15118e+06
: Dataset[MatchNNDataSet] : Signal -- efficiency : 0.988773
: Dataset[MatchNNDataSet] : Background requirement: "chi2<15 && distX<250 && distY<400 && dSlope<1.5 && dSlopeY<0.15"
: Dataset[MatchNNDataSet] : Background -- number of events passed: 7175761 / sum of weights: 7.17576e+06
: Dataset[MatchNNDataSet] : Background -- efficiency : 0.511083
: Dataset[MatchNNDataSet] : you have opted for interpreting the requested number of training/testing events
: to be the number of events AFTER your preselection cuts
:
: Dataset[MatchNNDataSet] : you have opted for interpreting the requested number of training/testing events
: to be the number of events AFTER your preselection cuts
:
: Dataset[MatchNNDataSet] : Weight renormalisation mode: "EqualNumEvents": renormalises all event classes ...
: Dataset[MatchNNDataSet] : such that the effective (weighted) number of events in each class is the same
: Dataset[MatchNNDataSet] : (and equals the number of events (entries) given for class=0 )
: Dataset[MatchNNDataSet] : ... i.e. such that Sum[i=1..N_j]{w_i} = N_classA, j=classA, classB, ...
: Dataset[MatchNNDataSet] : ... (note that N_j is the sum of TRAINING events
: Dataset[MatchNNDataSet] : ..... Testing events are not renormalised nor included in the renormalisation factor!)
: Number of training and testing events
: ---------------------------------------------------------------------------
: Signal -- training events : 50000
: Signal -- testing events : 20000
: Signal -- training and testing events: 70000
: Dataset[MatchNNDataSet] : Signal -- due to the preselection a scaling factor has been applied to the numbers of requested events: 0.988773
: Background -- training events : 500000
: Background -- testing events : 100000
: Background -- training and testing events: 600000
: Dataset[MatchNNDataSet] : Background -- due to the preselection a scaling factor has been applied to the numbers of requested events: 0.511083
:
DataSetInfo : Correlation matrix (Signal):
: --------------------------------------------------------
: chi2 teta2 distX distY dSlope dSlopeY
: chi2: +1.000 +0.197 +0.512 +0.603 +0.392 +0.418
: teta2: +0.197 +1.000 +0.456 +0.649 +0.399 +0.581
: distX: +0.512 +0.456 +1.000 +0.445 +0.555 +0.606
: distY: +0.603 +0.649 +0.445 +1.000 +0.529 +0.568
: dSlope: +0.392 +0.399 +0.555 +0.529 +1.000 +0.647
: dSlopeY: +0.418 +0.581 +0.606 +0.568 +0.647 +1.000
: --------------------------------------------------------
DataSetInfo : Correlation matrix (Background):
: --------------------------------------------------------
: chi2 teta2 distX distY dSlope dSlopeY
: chi2: +1.000 +0.001 +0.370 +0.305 +0.002 +0.084
: teta2: +0.001 +1.000 +0.173 +0.650 +0.280 +0.455
: distX: +0.370 +0.173 +1.000 +0.043 +0.627 +0.195
: distY: +0.305 +0.650 +0.043 +1.000 +0.240 +0.458
: dSlope: +0.002 +0.280 +0.627 +0.240 +1.000 +0.362
: dSlopeY: +0.084 +0.455 +0.195 +0.458 +0.362 +1.000
: --------------------------------------------------------
DataSetFactory : [MatchNNDataSet] :
:
Factory : [MatchNNDataSet] : Create Transformation "I" with events from all classes.
:
: Transformation, Variable selection :
: Input : variable 'chi2' <---> Output : variable 'chi2'
: Input : variable 'teta2' <---> Output : variable 'teta2'
: Input : variable 'distX' <---> Output : variable 'distX'
: Input : variable 'distY' <---> Output : variable 'distY'
: Input : variable 'dSlope' <---> Output : variable 'dSlope'
: Input : variable 'dSlopeY' <---> Output : variable 'dSlopeY'
TFHandler_Factory : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: 4.1539 4.6460 [ 1.3264e-05 15.000 ]
: teta2: 0.0079252 0.017224 [ 1.2100e-06 0.43619 ]
: distX: 27.109 38.586 [ 3.8147e-06 250.00 ]
: distY: 20.564 28.494 [ 1.5259e-05 399.49 ]
: dSlope: 0.28782 0.22814 [ 2.2016e-06 1.3026 ]
: dSlopeY: 0.0054782 0.0099926 [ 1.8626e-09 0.14834 ]
: -----------------------------------------------------------
: Ranking input variables (method unspecific)...
IdTransformation : Ranking result (top variable is best ranked)
: --------------------------------
: Rank : Variable : Separation
: --------------------------------
: 1 : chi2 : 6.095e-01
: 2 : distX : 4.727e-01
: 3 : distY : 1.428e-01
: 4 : dSlope : 7.613e-02
: 5 : dSlopeY : 5.967e-02
: 6 : teta2 : 5.937e-02
: --------------------------------
Factory : Train method: matching_mlp for Classification
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: -0.44615 0.61947 [ -1.0000 1.0000 ]
: teta2: -0.96367 0.078975 [ -1.0000 1.0000 ]
: distX: -0.78313 0.30869 [ -1.0000 1.0000 ]
: distY: -0.89705 0.14265 [ -1.0000 1.0000 ]
: dSlope: -0.55809 0.35029 [ -1.0000 1.0000 ]
: dSlopeY: -0.92614 0.13472 [ -1.0000 1.0000 ]
: -----------------------------------------------------------
: Training Network
:
: Elapsed time for training with 550000 events: 1.28e+03 sec
matching_mlp : [MatchNNDataSet] : Evaluation of matching_mlp on training sample (550000 events)
: Elapsed time for evaluation of 550000 events: 0.785 sec
: Creating xml weight file: MatchNNDataSet/weights/TMVAClassification_matching_mlp.weights.xml
: Creating standalone class: MatchNNDataSet/weights/TMVAClassification_matching_mlp.class.C
: Write special histos to file: matching_ghost_mlp_training.root:/MatchNNDataSet/Method_MLP/matching_mlp
Factory : Training finished
:
: Ranking input variables (method specific)...
matching_mlp : Ranking result (top variable is best ranked)
: --------------------------------
: Rank : Variable : Importance
: --------------------------------
: 1 : teta2 : 7.346e+02
: 2 : distX : 1.891e+02
: 3 : dSlopeY : 5.900e+01
: 4 : distY : 4.639e+01
: 5 : dSlope : 1.074e+01
: 6 : chi2 : 2.093e+00
: --------------------------------
Factory : === Destroy and recreate all methods via weight files for testing ===
:
: Reading weight file: MatchNNDataSet/weights/TMVAClassification_matching_mlp.weights.xml
matching_mlp : Building Network.
: Initializing weights
Factory : Test all methods
Factory : Test method: matching_mlp for Classification performance
:
matching_mlp : [MatchNNDataSet] : Evaluation of matching_mlp on testing sample (120000 events)
: Elapsed time for evaluation of 120000 events: 0.169 sec
Factory : Evaluate all methods
Factory : Evaluate classifier: matching_mlp
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: -0.15813 0.62626 [ -1.0000 0.99995 ]
: teta2: -0.97140 0.071651 [ -1.0000 0.85686 ]
: distX: -0.67477 0.35263 [ -1.0000 0.99866 ]
: distY: -0.87476 0.15583 [ -1.0000 0.99415 ]
: dSlope: -0.49635 0.36886 [ -0.99993 0.97372 ]
: dSlopeY: -0.91980 0.13029 [ -1.0000 1.0219 ]
: -----------------------------------------------------------
matching_mlp : [MatchNNDataSet] : Loop over test events and fill histograms with classifier response...
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: -0.15813 0.62626 [ -1.0000 0.99995 ]
: teta2: -0.97140 0.071651 [ -1.0000 0.85686 ]
: distX: -0.67477 0.35263 [ -1.0000 0.99866 ]
: distY: -0.87476 0.15583 [ -1.0000 0.99415 ]
: dSlope: -0.49635 0.36886 [ -0.99993 0.97372 ]
: dSlopeY: -0.91980 0.13029 [ -1.0000 1.0219 ]
: -----------------------------------------------------------
:
: Evaluation results ranked by best signal efficiency and purity (area)
: -------------------------------------------------------------------------------------------------------------------
: DataSet MVA
: Name: Method: ROC-integ
: MatchNNDataSet matching_mlp : 0.970
: -------------------------------------------------------------------------------------------------------------------
:
: Testing efficiency compared to training efficiency (overtraining check)
: -------------------------------------------------------------------------------------------------------------------
: DataSet MVA Signal efficiency: from test sample (from training sample)
: Name: Method: @B=0.01 @B=0.10 @B=0.30
: -------------------------------------------------------------------------------------------------------------------
: MatchNNDataSet matching_mlp : 0.543 (0.551) 0.936 (0.936) 0.985 (0.985)
: -------------------------------------------------------------------------------------------------------------------
:
Dataset:MatchNNDataSet : Created tree 'TestTree' with 120000 events
:
Dataset:MatchNNDataSet : Created tree 'TrainTree' with 550000 events
:
Factory : Thank you for using TMVA!
: For citation information, please visit: http://tmva.sf.net/citeTMVA.html
Transforming neural_net_training/result/MatchNNDataSet/weights/TMVAClassification_matching_mlp.class.C ...
Found minimum and maximum values for 6 variables.
Found 3 matrices:
1. fWeightMatrix0to1 with 7 columns and 8 rows
2. fWeightMatrix1to2 with 9 columns and 6 rows
3. fWeightMatrix2to3 with 7 columns and 1 rows

268
outputs_nn/output_og_weights_B.txt

@ -1,268 +0,0 @@
: Parsing option string:
: ... "V:!Silent:Color:DrawProgressBar:AnalysisType=Classification"
: The following options are set:
: - By User:
: V: "True" [Verbose flag]
: Color: "True" [Flag for coloured screen output (default: True, if in batch mode: False)]
: Silent: "False" [Batch mode: boolean silent flag inhibiting any output from TMVA after the creation of the factory class object (default: False)]
: DrawProgressBar: "True" [Draw progress bar to display training, testing and evaluation schedule (default: True)]
: AnalysisType: "Classification" [Set the analysis type (Classification, Regression, Multiclass, Auto) (default: Auto)]
: - Default:
: VerboseLevel: "Info" [VerboseLevel (Debug/Verbose/Info)]
: Transformations: "I" [List of transformations to test; formatting example: "Transformations=I;D;P;U;G,D", for identity, decorrelation, PCA, Uniform and Gaussianisation followed by decorrelation transformations]
: Correlations: "False" [boolean to show correlation in output]
: ROC: "True" [boolean to show ROC in output]
: ModelPersistence: "True" [Option to save the trained model in xml file or using serialization]
DataSetInfo : [MatchNNDataSet] : Added class "Signal"
: Add Tree Signal of type Signal with 6590 events
DataSetInfo : [MatchNNDataSet] : Added class "Background"
: Add Tree Bkg of type Background with 14040318 events
: Dataset[MatchNNDataSet] : Class index : 0 name : Signal
: Dataset[MatchNNDataSet] : Class index : 1 name : Background
Factory : Booking method: matching_mlp
:
: Parsing option string:
: ... "!H:V:TrainingMethod=BP:NeuronType=ReLU:EstimatorType=CE:VarTransform=Norm:NCycles=700:HiddenLayers=N+2,N:TestRate=50:Sampling=1.0:SamplingImportance=1.0:LearningRate=0.02:DecayRate=0.01:!UseRegulator"
: The following options are set:
: - By User:
: <none>
: - Default:
: Boost_num: "0" [Number of times the classifier will be boosted]
: Parsing option string:
: ... "!H:V:TrainingMethod=BP:NeuronType=ReLU:EstimatorType=CE:VarTransform=Norm:NCycles=700:HiddenLayers=N+2,N:TestRate=50:Sampling=1.0:SamplingImportance=1.0:LearningRate=0.02:DecayRate=0.01:!UseRegulator"
: The following options are set:
: - By User:
: NCycles: "700" [Number of training cycles]
: HiddenLayers: "N+2,N" [Specification of hidden layer architecture]
: NeuronType: "ReLU" [Neuron activation function type]
: EstimatorType: "CE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood]
: V: "True" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
: VarTransform: "Norm" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
: H: "False" [Print method-specific help message]
: TrainingMethod: "BP" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)]
: LearningRate: "2.000000e-02" [ANN learning rate parameter]
: DecayRate: "1.000000e-02" [Decay rate for learning parameter]
: TestRate: "50" [Test for overtraining performed at each #th epochs]
: Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch]
: SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.]
: UseRegulator: "False" [Use regulator to avoid over-training]
: - Default:
: RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')]
: NeuronInputType: "sum" [Neuron input function type]
: VerbosityLevel: "Default" [Verbosity level]
: CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
: IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
: EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)]
: SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training]
: SamplingTraining: "True" [The training sample is sampled]
: SamplingTesting: "False" [The testing sample is sampled]
: ResetStep: "50" [How often BFGS should reset history]
: Tau: "3.000000e+00" [LineSearch "size step"]
: BPMode: "sequential" [Back-propagation learning mode: sequential or batch]
: BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events]
: ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)]
: ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)]
: UpdateLimit: "10000" [Maximum times of regulator update]
: CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value]
: WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range]
matching_mlp : [MatchNNDataSet] : Create Transformation "Norm" with events from all classes.
:
: Transformation, Variable selection :
: Input : variable 'chi2' <---> Output : variable 'chi2'
: Input : variable 'teta2' <---> Output : variable 'teta2'
: Input : variable 'distX' <---> Output : variable 'distX'
: Input : variable 'distY' <---> Output : variable 'distY'
: Input : variable 'dSlope' <---> Output : variable 'dSlope'
: Input : variable 'dSlopeY' <---> Output : variable 'dSlopeY'
matching_mlp : Building Network.
: Initializing weights
Factory : Train all methods
: Rebuilding Dataset MatchNNDataSet
: Parsing option string:
: ... "SplitMode=random:V:nTrain_Signal=0:nTrain_Background=200000.0:nTest_Signal=1000.0:nTest_Background=50000.0"
: The following options are set:
: - By User:
: SplitMode: "Random" [Method of picking training and testing events (default: random)]
: nTrain_Signal: "0" [Number of training events of class Signal (default: 0 = all)]
: nTest_Signal: "1000" [Number of test events of class Signal (default: 0 = all)]
: nTrain_Background: "200000" [Number of training events of class Background (default: 0 = all)]
: nTest_Background: "50000" [Number of test events of class Background (default: 0 = all)]
: V: "True" [Verbosity (default: true)]
: - Default:
: MixMode: "SameAsSplitMode" [Method of mixing events of different classes into one dataset (default: SameAsSplitMode)]
: SplitSeed: "100" [Seed for random event shuffling]
: NormMode: "EqualNumEvents" [Overall renormalisation of event-by-event weights used in the training (NumEvents: average weight of 1 per event, independently for signal and background; EqualNumEvents: average weight of 1 per event for signal, and sum of weights for background equal to sum of weights for signal)]
: ScaleWithPreselEff: "False" [Scale the number of requested events by the eff. of the preselection cuts (or not)]
: TrainTestSplit_Signal: "0.000000e+00" [Number of test events of class Signal (default: 0 = all)]
: TrainTestSplit_Background: "0.000000e+00" [Number of test events of class Background (default: 0 = all)]
: VerboseLevel: "Info" [VerboseLevel (Debug/Verbose/Info)]
: Correlations: "True" [Boolean to show correlation output (Default: true)]
: CalcCorrelations: "True" [Compute correlations and also some variable statistics, e.g. min/max (Default: true )]
: Building event vectors for type 2 Signal
: Dataset[MatchNNDataSet] : create input formulas for tree Signal
: Building event vectors for type 2 Background
: Dataset[MatchNNDataSet] : create input formulas for tree Bkg
DataSetFactory : [MatchNNDataSet] : Number of events in input trees
:
:
: Dataset[MatchNNDataSet] : Weight renormalisation mode: "EqualNumEvents": renormalises all event classes ...
: Dataset[MatchNNDataSet] : such that the effective (weighted) number of events in each class is the same
: Dataset[MatchNNDataSet] : (and equals the number of events (entries) given for class=0 )
: Dataset[MatchNNDataSet] : ... i.e. such that Sum[i=1..N_j]{w_i} = N_classA, j=classA, classB, ...
: Dataset[MatchNNDataSet] : ... (note that N_j is the sum of TRAINING events
: Dataset[MatchNNDataSet] : ..... Testing events are not renormalised nor included in the renormalisation factor!)
: Number of training and testing events
: ---------------------------------------------------------------------------
: Signal -- training events : 5590
: Signal -- testing events : 1000
: Signal -- training and testing events: 6590
: Background -- training events : 200000
: Background -- testing events : 50000
: Background -- training and testing events: 250000
:
DataSetInfo : Correlation matrix (Signal):
: --------------------------------------------------------
: chi2 teta2 distX distY dSlope dSlopeY
: chi2: +1.000 -0.083 +0.225 +0.287 +0.211 +0.054
: teta2: -0.083 +1.000 +0.035 +0.472 +0.174 +0.617
: distX: +0.225 +0.035 +1.000 -0.194 +0.684 +0.087
: distY: +0.287 +0.472 -0.194 +1.000 +0.330 +0.471
: dSlope: +0.211 +0.174 +0.684 +0.330 +1.000 +0.325
: dSlopeY: +0.054 +0.617 +0.087 +0.471 +0.325 +1.000
: --------------------------------------------------------
DataSetInfo : Correlation matrix (Background):
: --------------------------------------------------------
: chi2 teta2 distX distY dSlope dSlopeY
: chi2: +1.000 +0.003 +0.359 +0.315 -0.004 +0.101
: teta2: +0.003 +1.000 +0.212 +0.622 +0.296 +0.492
: distX: +0.359 +0.212 +1.000 +0.060 +0.635 +0.204
: distY: +0.315 +0.622 +0.060 +1.000 +0.246 +0.530
: dSlope: -0.004 +0.296 +0.635 +0.246 +1.000 +0.360
: dSlopeY: +0.101 +0.492 +0.204 +0.530 +0.360 +1.000
: --------------------------------------------------------
DataSetFactory : [MatchNNDataSet] :
:
Factory : [MatchNNDataSet] : Create Transformation "I" with events from all classes.
:
: Transformation, Variable selection :
: Input : variable 'chi2' <---> Output : variable 'chi2'
: Input : variable 'teta2' <---> Output : variable 'teta2'
: Input : variable 'distX' <---> Output : variable 'distX'
: Input : variable 'distY' <---> Output : variable 'distY'
: Input : variable 'dSlope' <---> Output : variable 'dSlope'
: Input : variable 'dSlopeY' <---> Output : variable 'dSlopeY'
TFHandler_Factory : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: 13.730 8.0164 [ 0.00031556 30.000 ]
: teta2: 0.0041449 0.012655 [ 1.1428e-06 0.43138 ]
: distX: 69.832 60.841 [ 0.00027466 490.80 ]
: distY: 31.145 37.661 [ 0.00010300 497.14 ]
: dSlope: 0.36688 0.24104 [ 1.2597e-05 1.3582 ]
: dSlopeY: 0.0063738 0.010662 [ 4.9360e-08 0.14883 ]
: -----------------------------------------------------------
: Ranking input variables (method unspecific)...
IdTransformation : Ranking result (top variable is best ranked)
: --------------------------------
: Rank : Variable : Separation
: --------------------------------
: 1 : chi2 : 8.858e-02
: 2 : distY : 5.736e-02
: 3 : teta2 : 3.110e-02
: 4 : distX : 2.441e-02
: 5 : dSlope : 2.026e-02
: 6 : dSlopeY : 1.556e-02
: --------------------------------
Factory : Train method: matching_mlp for Classification
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: -0.084705 0.53444 [ -1.0000 1.0000 ]
: teta2: -0.98079 0.058673 [ -1.0000 1.0000 ]
: distX: -0.71544 0.24793 [ -1.0000 1.0000 ]
: distY: -0.87470 0.15151 [ -1.0000 1.0000 ]
: dSlope: -0.45977 0.35494 [ -1.0000 1.0000 ]
: dSlopeY: -0.91435 0.14328 [ -1.0000 1.0000 ]
: -----------------------------------------------------------
: Training Network
:
: Elapsed time for training with 205590 events: 465 sec
matching_mlp : [MatchNNDataSet] : Evaluation of matching_mlp on training sample (205590 events)
: Elapsed time for evaluation of 205590 events: 0.252 sec
: Creating xml weight file: MatchNNDataSet/weights/TMVAClassification_matching_mlp.weights.xml
: Creating standalone class: MatchNNDataSet/weights/TMVAClassification_matching_mlp.class.C
: Write special histos to file: matching_ghost_mlp_training.root:/MatchNNDataSet/Method_MLP/matching_mlp
Factory : Training finished
:
: Ranking input variables (method specific)...
matching_mlp : Ranking result (top variable is best ranked)
: --------------------------------
: Rank : Variable : Importance
: --------------------------------
: 1 : distY : 2.139e+02
: 2 : teta2 : 1.005e+02
: 3 : dSlopeY : 9.191e+01
: 4 : distX : 8.898e+01
: 5 : dSlope : 1.082e+01
: 6 : chi2 : 1.776e+00
: --------------------------------
Factory : === Destroy and recreate all methods via weight files for testing ===
:
: Reading weight file: MatchNNDataSet/weights/TMVAClassification_matching_mlp.weights.xml
matching_mlp : Building Network.
: Initializing weights
Factory : Test all methods
Factory : Test method: matching_mlp for Classification performance
:
matching_mlp : [MatchNNDataSet] : Evaluation of matching_mlp on testing sample (51000 events)
: Elapsed time for evaluation of 51000 events: 0.0702 sec
Factory : Evaluate all methods
Factory : Evaluate classifier: matching_mlp
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: -0.011828 0.57705 [ -0.99996 0.99998 ]
: teta2: -0.97507 0.067138 [ -0.99998 0.27868 ]
: distX: -0.72636 0.26123 [ -1.0000 0.90538 ]
: distY: -0.84283 0.18429 [ -0.99999 1.0037 ]
: dSlope: -0.48676 0.36013 [ -0.99980 0.87659 ]
: dSlopeY: -0.90653 0.13847 [ -1.0000 1.0030 ]
: -----------------------------------------------------------
matching_mlp : [MatchNNDataSet] : Loop over test events and fill histograms with classifier response...
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: -0.011828 0.57705 [ -0.99996 0.99998 ]
: teta2: -0.97507 0.067138 [ -0.99998 0.27868 ]
: distX: -0.72636 0.26123 [ -1.0000 0.90538 ]
: distY: -0.84283 0.18429 [ -0.99999 1.0037 ]
: dSlope: -0.48676 0.36013 [ -0.99980 0.87659 ]
: dSlopeY: -0.90653 0.13847 [ -1.0000 1.0030 ]
: -----------------------------------------------------------
:
: Evaluation results ranked by best signal efficiency and purity (area)
: -------------------------------------------------------------------------------------------------------------------
: DataSet MVA
: Name: Method: ROC-integ
: MatchNNDataSet matching_mlp : 0.850
: -------------------------------------------------------------------------------------------------------------------
:
: Testing efficiency compared to training efficiency (overtraining check)
: -------------------------------------------------------------------------------------------------------------------
: DataSet MVA Signal efficiency: from test sample (from training sample)
: Name: Method: @B=0.01 @B=0.10 @B=0.30
: -------------------------------------------------------------------------------------------------------------------
: MatchNNDataSet matching_mlp : 0.050 (0.050) 0.446 (0.447) 0.869 (0.869)
: -------------------------------------------------------------------------------------------------------------------
:
Dataset:MatchNNDataSet : Created tree 'TestTree' with 51000 events
:
Dataset:MatchNNDataSet : Created tree 'TrainTree' with 205590 events
:
Factory : Thank you for using TMVA!
: For citation information, please visit: http://tmva.sf.net/citeTMVA.html
Transforming nn_electron_training/result/MatchNNDataSet/weights/TMVAClassification_matching_mlp.class.C ...
Found minimum and maximum values for 6 variables.
Found 3 matrices:
1. fWeightMatrix0to1 with 7 columns and 8 rows
2. fWeightMatrix1to2 with 9 columns and 6 rows
3. fWeightMatrix2to3 with 7 columns and 1 rows

268
outputs_nn/output_og_weights_res_bkg_B.txt

@ -1,268 +0,0 @@
: Parsing option string:
: ... "V:!Silent:Color:DrawProgressBar:AnalysisType=Classification"
: The following options are set:
: - By User:
: V: "True" [Verbose flag]
: Color: "True" [Flag for coloured screen output (default: True, if in batch mode: False)]
: Silent: "False" [Batch mode: boolean silent flag inhibiting any output from TMVA after the creation of the factory class object (default: False)]
: DrawProgressBar: "True" [Draw progress bar to display training, testing and evaluation schedule (default: True)]
: AnalysisType: "Classification" [Set the analysis type (Classification, Regression, Multiclass, Auto) (default: Auto)]
: - Default:
: VerboseLevel: "Info" [VerboseLevel (Debug/Verbose/Info)]
: Transformations: "I" [List of transformations to test; formatting example: "Transformations=I;D;P;U;G,D", for identity, decorrelation, PCA, Uniform and Gaussianisation followed by decorrelation transformations]
: Correlations: "False" [boolean to show correlation in output]
: ROC: "True" [boolean to show ROC in output]
: ModelPersistence: "True" [Option to save the trained model in xml file or using serialization]
DataSetInfo : [MatchNNDataSet] : Added class "Signal"
: Add Tree Signal of type Signal with 6590 events
DataSetInfo : [MatchNNDataSet] : Added class "Background"
: Add Tree Bkg of type Background with 10981310 events
: Dataset[MatchNNDataSet] : Class index : 0 name : Signal
: Dataset[MatchNNDataSet] : Class index : 1 name : Background
Factory : Booking method: matching_mlp
:
: Parsing option string:
: ... "!H:V:TrainingMethod=BP:NeuronType=ReLU:EstimatorType=CE:VarTransform=Norm:NCycles=700:HiddenLayers=N+2,N:TestRate=50:Sampling=1.0:SamplingImportance=1.0:LearningRate=0.02:DecayRate=0.01:!UseRegulator"
: The following options are set:
: - By User:
: <none>
: - Default:
: Boost_num: "0" [Number of times the classifier will be boosted]
: Parsing option string:
: ... "!H:V:TrainingMethod=BP:NeuronType=ReLU:EstimatorType=CE:VarTransform=Norm:NCycles=700:HiddenLayers=N+2,N:TestRate=50:Sampling=1.0:SamplingImportance=1.0:LearningRate=0.02:DecayRate=0.01:!UseRegulator"
: The following options are set:
: - By User:
: NCycles: "700" [Number of training cycles]
: HiddenLayers: "N+2,N" [Specification of hidden layer architecture]
: NeuronType: "ReLU" [Neuron activation function type]
: EstimatorType: "CE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood]
: V: "True" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
: VarTransform: "Norm" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
: H: "False" [Print method-specific help message]
: TrainingMethod: "BP" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)]
: LearningRate: "2.000000e-02" [ANN learning rate parameter]
: DecayRate: "1.000000e-02" [Decay rate for learning parameter]
: TestRate: "50" [Test for overtraining performed at each #th epochs]
: Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch]
: SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.]
: UseRegulator: "False" [Use regulator to avoid over-training]
: - Default:
: RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')]
: NeuronInputType: "sum" [Neuron input function type]
: VerbosityLevel: "Default" [Verbosity level]
: CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
: IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
: EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)]
: SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training]
: SamplingTraining: "True" [The training sample is sampled]
: SamplingTesting: "False" [The testing sample is sampled]
: ResetStep: "50" [How often BFGS should reset history]
: Tau: "3.000000e+00" [LineSearch "size step"]
: BPMode: "sequential" [Back-propagation learning mode: sequential or batch]
: BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events]
: ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)]
: ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)]
: UpdateLimit: "10000" [Maximum times of regulator update]
: CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value]
: WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range]
matching_mlp : [MatchNNDataSet] : Create Transformation "Norm" with events from all classes.
:
: Transformation, Variable selection :
: Input : variable 'chi2' <---> Output : variable 'chi2'
: Input : variable 'teta2' <---> Output : variable 'teta2'
: Input : variable 'distX' <---> Output : variable 'distX'
: Input : variable 'distY' <---> Output : variable 'distY'
: Input : variable 'dSlope' <---> Output : variable 'dSlope'
: Input : variable 'dSlopeY' <---> Output : variable 'dSlopeY'
matching_mlp : Building Network.
: Initializing weights
Factory : Train all methods
: Rebuilding Dataset MatchNNDataSet
: Parsing option string:
: ... "SplitMode=random:V:nTrain_Signal=0:nTrain_Background=200000.0:nTest_Signal=5000.0:nTest_Background=50000.0"
: The following options are set:
: - By User:
: SplitMode: "Random" [Method of picking training and testing events (default: random)]
: nTrain_Signal: "0" [Number of training events of class Signal (default: 0 = all)]
: nTest_Signal: "5000" [Number of test events of class Signal (default: 0 = all)]
: nTrain_Background: "200000" [Number of training events of class Background (default: 0 = all)]
: nTest_Background: "50000" [Number of test events of class Background (default: 0 = all)]
: V: "True" [Verbosity (default: true)]
: - Default:
: MixMode: "SameAsSplitMode" [Method of mixing events of different classes into one dataset (default: SameAsSplitMode)]
: SplitSeed: "100" [Seed for random event shuffling]
: NormMode: "EqualNumEvents" [Overall renormalisation of event-by-event weights used in the training (NumEvents: average weight of 1 per event, independently for signal and background; EqualNumEvents: average weight of 1 per event for signal, and sum of weights for background equal to sum of weights for signal)]
: ScaleWithPreselEff: "False" [Scale the number of requested events by the eff. of the preselection cuts (or not)]
: TrainTestSplit_Signal: "0.000000e+00" [Number of test events of class Signal (default: 0 = all)]
: TrainTestSplit_Background: "0.000000e+00" [Number of test events of class Background (default: 0 = all)]
: VerboseLevel: "Info" [VerboseLevel (Debug/Verbose/Info)]
: Correlations: "True" [Boolean to show correlation output (Default: true)]
: CalcCorrelations: "True" [Compute correlations and also some variable statistics, e.g. min/max (Default: true )]
: Building event vectors for type 2 Signal
: Dataset[MatchNNDataSet] : create input formulas for tree Signal
: Building event vectors for type 2 Background
: Dataset[MatchNNDataSet] : create input formulas for tree Bkg
DataSetFactory : [MatchNNDataSet] : Number of events in input trees
:
:
: Dataset[MatchNNDataSet] : Weight renormalisation mode: "EqualNumEvents": renormalises all event classes ...
: Dataset[MatchNNDataSet] : such that the effective (weighted) number of events in each class is the same
: Dataset[MatchNNDataSet] : (and equals the number of events (entries) given for class=0 )
: Dataset[MatchNNDataSet] : ... i.e. such that Sum[i=1..N_j]{w_i} = N_classA, j=classA, classB, ...
: Dataset[MatchNNDataSet] : ... (note that N_j is the sum of TRAINING events
: Dataset[MatchNNDataSet] : ..... Testing events are not renormalised nor included in the renormalisation factor!)
: Number of training and testing events
: ---------------------------------------------------------------------------
: Signal -- training events : 1590
: Signal -- testing events : 5000
: Signal -- training and testing events: 6590
: Background -- training events : 200000
: Background -- testing events : 50000
: Background -- training and testing events: 250000
:
DataSetInfo : Correlation matrix (Signal):
: --------------------------------------------------------
: chi2 teta2 distX distY dSlope dSlopeY
: chi2: +1.000 -0.090 +0.192 +0.272 +0.184 +0.049
: teta2: -0.090 +1.000 +0.041 +0.483 +0.208 +0.628
: distX: +0.192 +0.041 +1.000 -0.179 +0.680 +0.101
: distY: +0.272 +0.483 -0.179 +1.000 +0.363 +0.496
: dSlope: +0.184 +0.208 +0.680 +0.363 +1.000 +0.350
: dSlopeY: +0.049 +0.628 +0.101 +0.496 +0.350 +1.000
: --------------------------------------------------------
DataSetInfo : Correlation matrix (Background):
: --------------------------------------------------------
: chi2 teta2 distX distY dSlope dSlopeY
: chi2: +1.000 -0.018 +0.249 +0.222 +0.061 +0.061
: teta2: -0.018 +1.000 +0.219 +0.658 +0.336 +0.485
: distX: +0.249 +0.219 +1.000 -0.003 +0.782 +0.189
: distY: +0.222 +0.658 -0.003 +1.000 +0.284 +0.551
: dSlope: +0.061 +0.336 +0.782 +0.284 +1.000 +0.379
: dSlopeY: +0.061 +0.485 +0.189 +0.551 +0.379 +1.000
: --------------------------------------------------------
DataSetFactory : [MatchNNDataSet] :
:
Factory : [MatchNNDataSet] : Create Transformation "I" with events from all classes.
:
: Transformation, Variable selection :
: Input : variable 'chi2' <---> Output : variable 'chi2'
: Input : variable 'teta2' <---> Output : variable 'teta2'
: Input : variable 'distX' <---> Output : variable 'distX'
: Input : variable 'distY' <---> Output : variable 'distY'
: Input : variable 'dSlope' <---> Output : variable 'dSlope'
: Input : variable 'dSlopeY' <---> Output : variable 'dSlopeY'
TFHandler_Factory : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: 14.983 7.6381 [ 0.13393 30.000 ]
: teta2: 0.0045002 0.013698 [ 1.0756e-06 0.36197 ]
: distX: 74.269 61.998 [ 0.00010681 495.87 ]
: distY: 33.972 40.646 [ 0.00016022 498.46 ]
: dSlope: 0.35639 0.24245 [ 9.2713e-06 1.3376 ]
: dSlopeY: 0.0069454 0.011901 [ 8.8476e-08 0.14989 ]
: -----------------------------------------------------------
: Ranking input variables (method unspecific)...
IdTransformation : Ranking result (top variable is best ranked)
: --------------------------------
: Rank : Variable : Separation
: --------------------------------
: 1 : chi2 : 9.899e-02
: 2 : distY : 8.544e-02
: 3 : teta2 : 4.508e-02
: 4 : dSlope : 3.745e-02
: 5 : dSlopeY : 2.733e-02
: 6 : distX : 1.657e-02
: --------------------------------
Factory : Train method: matching_mlp for Classification
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: -0.0056167 0.51149 [ -1.0000 1.0000 ]
: teta2: -0.97514 0.075684 [ -1.0000 1.0000 ]
: distX: -0.70045 0.25006 [ -1.0000 1.0000 ]
: distY: -0.86369 0.16308 [ -1.0000 1.0000 ]
: dSlope: -0.46715 0.36251 [ -1.0000 1.0000 ]
: dSlopeY: -0.90733 0.15880 [ -1.0000 1.0000 ]
: -----------------------------------------------------------
: Training Network
:
: Elapsed time for training with 201590 events: 424 sec
matching_mlp : [MatchNNDataSet] : Evaluation of matching_mlp on training sample (201590 events)
: Elapsed time for evaluation of 201590 events: 0.244 sec
: Creating xml weight file: MatchNNDataSet/weights/TMVAClassification_matching_mlp.weights.xml
: Creating standalone class: MatchNNDataSet/weights/TMVAClassification_matching_mlp.class.C
: Write special histos to file: matching_ghost_mlp_training.root:/MatchNNDataSet/Method_MLP/matching_mlp
Factory : Training finished
:
: Ranking input variables (method specific)...
matching_mlp : Ranking result (top variable is best ranked)
: --------------------------------
: Rank : Variable : Importance
: --------------------------------
: 1 : distY : 7.131e+01
: 2 : teta2 : 3.522e+01
: 3 : distX : 2.316e+01
: 4 : dSlopeY : 1.020e+01
: 5 : dSlope : 6.822e+00
: 6 : chi2 : 2.546e+00
: --------------------------------
Factory : === Destroy and recreate all methods via weight files for testing ===
:
: Reading weight file: MatchNNDataSet/weights/TMVAClassification_matching_mlp.weights.xml
matching_mlp : Building Network.
: Initializing weights
Factory : Test all methods
Factory : Test method: matching_mlp for Classification performance
:
matching_mlp : [MatchNNDataSet] : Evaluation of matching_mlp on testing sample (55000 events)
: Elapsed time for evaluation of 55000 events: 0.0744 sec
Factory : Evaluate all methods
Factory : Evaluate classifier: matching_mlp
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: 0.12456 0.50663 [ -0.99498 1.0000 ]
: teta2: -0.96870 0.085208 [ -1.0000 0.96194 ]
: distX: -0.68732 0.27168 [ -0.99998 0.99743 ]
: distY: -0.83061 0.19253 [ -1.0000 1.0027 ]
: dSlope: -0.50226 0.36648 [ -0.99996 0.94917 ]
: dSlopeY: -0.90192 0.14156 [ -1.0000 0.98588 ]
: -----------------------------------------------------------
matching_mlp : [MatchNNDataSet] : Loop over test events and fill histograms with classifier response...
:
TFHandler_matching_mlp : Variable Mean RMS [ Min Max ]
: -----------------------------------------------------------
: chi2: 0.12456 0.50663 [ -0.99498 1.0000 ]
: teta2: -0.96870 0.085208 [ -1.0000 0.96194 ]
: distX: -0.68732 0.27168 [ -0.99998 0.99743 ]
: distY: -0.83061 0.19253 [ -1.0000 1.0027 ]
: dSlope: -0.50226 0.36648 [ -0.99996 0.94917 ]
: dSlopeY: -0.90192 0.14156 [ -1.0000 0.98588 ]
: -----------------------------------------------------------
:
: Evaluation results ranked by best signal efficiency and purity (area)
: -------------------------------------------------------------------------------------------------------------------
: DataSet MVA
: Name: Method: ROC-integ
: MatchNNDataSet matching_mlp : 0.838
: -------------------------------------------------------------------------------------------------------------------
:
: Testing efficiency compared to training efficiency (overtraining check)
: -------------------------------------------------------------------------------------------------------------------
: DataSet MVA Signal efficiency: from test sample (from training sample)
: Name: Method: @B=0.01 @B=0.10 @B=0.30
: -------------------------------------------------------------------------------------------------------------------
: MatchNNDataSet matching_mlp : 0.067 (0.067) 0.460 (0.458) 0.828 (0.824)
: -------------------------------------------------------------------------------------------------------------------
:
Dataset:MatchNNDataSet : Created tree 'TestTree' with 55000 events
:
Dataset:MatchNNDataSet : Created tree 'TrainTree' with 201590 events
:
Factory : Thank you for using TMVA!
: For citation information, please visit: http://tmva.sf.net/citeTMVA.html
Transforming nn_electron_training/result/MatchNNDataSet/weights/TMVAClassification_matching_mlp.class.C ...
Found minimum and maximum values for 6 variables.
Found 3 matrices:
1. fWeightMatrix0to1 with 7 columns and 8 rows
2. fWeightMatrix1to2 with 9 columns and 6 rows
3. fWeightMatrix2to3 with 7 columns and 1 rows

186
test/ghost_data_new_vars.ipynb
File diff suppressed because one or more lines are too long
View File

408
test/ghost_data_test.ipynb
File diff suppressed because one or more lines are too long
View File

Loading…
Cancel
Save